Skip to content

Commit

Permalink
Merge branch 'main' into patch-1
Browse files Browse the repository at this point in the history
  • Loading branch information
maximevtush authored Feb 13, 2025
2 parents a1cfba2 + 67330a1 commit a7f0777
Show file tree
Hide file tree
Showing 11 changed files with 72 additions and 42 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/cargo-features.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ concurrency:

jobs:
cargo-features:
runs-on: ubuntu-latest
runs-on: buildjet-8vcpu-ubuntu-2204
steps:
- uses: taiki-e/install-action@cargo-hack

Expand Down
1 change: 1 addition & 0 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ concurrency:

env:
RUST_LOG: info,libp2p=off,node=error
RUSTFLAGS: "--cfg feature=\"fee\" --cfg feature=\"marketplace\""
CARGO_TERM_COLOR: always
# Save the process compose logs
PC_LOGS: /tmp/pc.log
Expand Down
6 changes: 3 additions & 3 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion docker-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -395,7 +395,7 @@ services:
image: ghcr.io/espressosystems/espresso-sequencer/sequencer:main
ports:
- "$ESPRESSO_SEQUENCER3_API_PORT:$ESPRESSO_SEQUENCER_API_PORT"
command: sequencer -- http -- query --storage-fs
command: sequencer -- http -- query -- storage-fs
environment:
- ESPRESSO_SEQUENCER_EMBEDDED_DB=false
- ESPRESSO_SEQUENCER_GENESIS_FILE
Expand Down
4 changes: 1 addition & 3 deletions docker/sequencer.Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,6 @@ RUN chmod +x /bin/sequencer
# configuration beyond the lifetime of the Docker container itself.
ENV ESPRESSO_SEQUENCER_STORAGE_PATH=/store/sequencer

# We run the additional `status` and `catchup` modules by default. These are modules that require
# minimal resources (no persistent storage) but improve the functionality of the network.
CMD ["/bin/sequencer", "--", "http", "--", "status", "--", "catchup"]
CMD ["/bin/sequencer", "--", "http"]
HEALTHCHECK --interval=1s --timeout=1s --retries=100 CMD curl --fail http://localhost:${ESPRESSO_SEQUENCER_API_PORT}/healthcheck || exit 1
EXPOSE ${ESPRESSO_SEQUENCER_API_PORT}
4 changes: 2 additions & 2 deletions hotshot-query-service/src/data_source/storage/ledger_log.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ use atomic_store::{
use serde::{de::DeserializeOwned, Serialize};
use std::collections::VecDeque;
use std::fmt::Debug;
use tracing::warn;
use tracing::{debug, warn};

/// A caching append log for ledger objects.
#[derive(Debug)]
Expand Down Expand Up @@ -138,7 +138,7 @@ impl<T: Serialize + DeserializeOwned + Clone> LedgerLog<T> {
let len = self.iter().len();
let target_len = std::cmp::max(index, len);
for i in len..target_len {
warn!("storing placeholders for position {i}/{target_len}");
debug!("storing placeholders for position {i}/{target_len}");
if let Err(err) = self.store_resource(None) {
warn!("Failed to store placeholder: {}", err);
return Err(err);
Expand Down
4 changes: 4 additions & 0 deletions hotshot/src/traits/election/helpers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -226,6 +226,10 @@ impl RandomOverlapQuorumIterator {
count / 2 > overlap_max,
"Overlap cannot be greater than the entire set size"
);
assert!(
count / 2 >= members_max - overlap_min,
"members_max must be greater or equal to half of the count plus overlap_min"
);

let (mut prev_rng, mut this_rng) = make_rngs(seed, round);

Expand Down
39 changes: 37 additions & 2 deletions hotshot/src/traits/election/randomized_committee_members.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ use hotshot_types::{
use hotshot_utils::anytrace::Result;
use primitive_types::U256;
use rand::{rngs::StdRng, Rng};
use tracing::error;

use crate::traits::election::helpers::QuorumFilterConfig;

Expand Down Expand Up @@ -61,6 +62,36 @@ impl<TYPES: NodeType, CONFIG: QuorumFilterConfig> RandomizedCommitteeMembers<TYP
fn make_da_quorum_filter(&self, epoch: <TYPES as NodeType>::Epoch) -> BTreeSet<usize> {
CONFIG::execute(epoch.u64(), self.da_stake_table.len())
}

/// Writes the offsets used for the quorum filter and da_quorum filter to stdout
fn debug_display_offsets(&self) {
/// Ensures that the quorum filters are only displayed once
static START: std::sync::Once = std::sync::Once::new();

START.call_once(|| {
error!(
"{} offsets for Quorum filter:",
std::any::type_name::<CONFIG>()
);
for epoch in 1..=10 {
error!(
" epoch {epoch}: {:?}",
self.make_quorum_filter(<TYPES as NodeType>::Epoch::new(epoch))
);
}

error!(
"{} offsets for DA Quorum filter:",
std::any::type_name::<CONFIG>()
);
for epoch in 1..=10 {
error!(
" epoch {epoch}: {:?}",
self.make_da_quorum_filter(<TYPES as NodeType>::Epoch::new(epoch))
);
}
});
}
}

impl<TYPES: NodeType, CONFIG: QuorumFilterConfig> Membership<TYPES>
Expand Down Expand Up @@ -114,14 +145,18 @@ impl<TYPES: NodeType, CONFIG: QuorumFilterConfig> Membership<TYPES>
.map(|entry| (TYPES::SignatureKey::public_key(entry), entry.clone()))
.collect();

Self {
let s = Self {
eligible_leaders,
stake_table: members,
da_stake_table: da_members,
indexed_stake_table,
indexed_da_stake_table,
_pd: PhantomData,
}
};

s.debug_display_offsets();

s
}

/// Get the stake table for the current view
Expand Down
4 changes: 4 additions & 0 deletions sequencer/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

[features]
default = ["fee"]
testing = [
"hotshot-testing",
"marketplace-builder-core",
Expand All @@ -15,6 +16,9 @@ testing = [
]
benchmarking = []
embedded-db = ["hotshot-query-service/embedded-db"]
fee = []
pos = []
marketplace = []

[[bin]]
name = "espresso-dev-node"
Expand Down
44 changes: 14 additions & 30 deletions sequencer/src/persistence/sql.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,8 @@ use hotshot_query_service::{
use hotshot_types::{
consensus::CommitmentMap,
data::{
vid_disperse::{ADVZDisperseShare, VidDisperseShare2},
DaProposal, EpochNumber, QuorumProposal, QuorumProposal2, QuorumProposalWrapper,
VidDisperseShare,
vid_disperse::ADVZDisperseShare, DaProposal, EpochNumber, QuorumProposal, QuorumProposal2,
QuorumProposalWrapper,
},
event::{Event, EventType, HotShotAction, LeafInfo},
message::{convert_proposal, Proposal},
Expand Down Expand Up @@ -583,24 +582,6 @@ pub struct Persistence {
gc_opt: ConsensusPruningOptions,
}

// TODO: clean up as part of VID migration
fn deserialize_vid_proposal_with_fallback(
bytes: &[u8],
) -> anyhow::Result<Proposal<SeqTypes, VidDisperseShare<SeqTypes>>> {
bincode::deserialize(bytes).or_else(|err| {
tracing::warn!("error decoding VID share: {err:#}");
match bincode::deserialize::<Proposal<SeqTypes, ADVZDisperseShare<SeqTypes>>>(bytes) {
Ok(proposal) => Ok(convert_proposal(proposal)),
Err(err2) => {
tracing::warn!("error decoding VID share fallback: {err2:#}");
Err(anyhow::anyhow!(
"Both primary and fallback deserialization failed: {err:#}, {err2:#}"
))
}
}
})
}

impl Persistence {
/// Ensure the `leaf_hash` column is populated for all existing quorum proposals.
///
Expand Down Expand Up @@ -728,7 +709,9 @@ impl Persistence {
.map(|row| {
let view: i64 = row.get("view");
let data: Vec<u8> = row.get("data");
let vid_proposal = deserialize_vid_proposal_with_fallback(&data)?;
let vid_proposal = bincode::deserialize::<
Proposal<SeqTypes, ADVZDisperseShare<SeqTypes>>,
>(&data)?;
Ok((view as u64, vid_proposal.data))
})
.collect::<anyhow::Result<BTreeMap<_, _>>>()?;
Expand Down Expand Up @@ -1413,15 +1396,16 @@ impl Provider<SeqTypes, VidCommonRequest> for Persistence {
}
};

let proposal = match deserialize_vid_proposal_with_fallback(&bytes) {
Ok(proposal) => proposal,
Err(_) => return None,
};
let share: Proposal<SeqTypes, ADVZDisperseShare<SeqTypes>> =
match bincode::deserialize(&bytes) {
Ok(share) => share,
Err(err) => {
tracing::warn!("error decoding VID share: {err:#}");
return None;
}
};

Some(match proposal.data {
VidDisperseShare::V0(ADVZDisperseShare::<SeqTypes> { common, .. }) => common,
VidDisperseShare::V1(VidDisperseShare2::<SeqTypes> { common, .. }) => common,
})
Some(share.data.common)
}
}

Expand Down
4 changes: 4 additions & 0 deletions sequencer/src/run.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ use super::{
persistence, Genesis, L1Params, NetworkParams,
};
use clap::Parser;
#[allow(unused_imports)]
use espresso_types::{
traits::NullEventConsumer, FeeVersion, MarketplaceVersion, SequencerVersions,
SolverAuctionResultsProvider, V0_0,
Expand Down Expand Up @@ -38,6 +39,7 @@ pub async fn main() -> anyhow::Result<()> {
let upgrade = genesis.upgrade_version;

match (base, upgrade) {
#[cfg(all(feature = "fee", feature = "marketplace"))]
(FeeVersion::VERSION, MarketplaceVersion::VERSION) => {
run(
genesis,
Expand All @@ -47,6 +49,7 @@ pub async fn main() -> anyhow::Result<()> {
)
.await
}
#[cfg(feature = "fee")]
(FeeVersion::VERSION, _) => {
run(
genesis,
Expand All @@ -56,6 +59,7 @@ pub async fn main() -> anyhow::Result<()> {
)
.await
}
#[cfg(feature = "marketplace")]
(MarketplaceVersion::VERSION, _) => {
run(
genesis,
Expand Down

0 comments on commit a7f0777

Please sign in to comment.