Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

types migration #2478

Draft
wants to merge 32 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from 10 commits
Commits
Show all changes
32 commits
Select commit Hold shift + click to select a range
7ac8ec9
consensus storage migration
imabdulbasit Jan 14, 2025
d58bb1d
fix migrations and commit transaction after batch insert
imabdulbasit Jan 14, 2025
d1dd38e
sqlite migration fixes and fix deserialization errors
imabdulbasit Jan 15, 2025
dbecff3
test
imabdulbasit Jan 15, 2025
27ab511
fs migration
imabdulbasit Jan 20, 2025
471f7ca
fix queries and tests
imabdulbasit Jan 20, 2025
00d48b5
merge main
imabdulbasit Jan 20, 2025
ff3d533
sequencer sqlite lock file
imabdulbasit Jan 20, 2025
c573ea6
fix postgres epoch migration
imabdulbasit Jan 20, 2025
5802184
fix undecided_state2 migration
imabdulbasit Jan 20, 2025
3885e21
bincode deserialize for migrated hashset
imabdulbasit Jan 23, 2025
2d75307
Merge remote-tracking branch 'origin/main' into ab/leaf2-migration
imabdulbasit Feb 6, 2025
0152568
query service leaf2 migration
imabdulbasit Feb 6, 2025
28063b6
Merge branch 'main' into ab/leaf2-migration
imabdulbasit Feb 6, 2025
9b80a9a
fix quorum proposal migration and storage
imabdulbasit Feb 6, 2025
4745aa6
fix fetching
imabdulbasit Feb 6, 2025
a84549d
fix leaf from proposal
imabdulbasit Feb 6, 2025
a6df3db
fix import
imabdulbasit Feb 6, 2025
946cd78
fix test_fetching_providers
imabdulbasit Feb 6, 2025
270bf0b
use TestVersions from hotshot-example-types
imabdulbasit Feb 6, 2025
bc74fd8
Merge branch 'main' into ab/leaf2-migration
imabdulbasit Feb 6, 2025
f0e9b41
fix migration completed check
imabdulbasit Feb 7, 2025
f0f9f49
call v2 methods
imabdulbasit Feb 10, 2025
d8485e2
Merge remote-tracking branch 'origin/main' into ab/leaf2-migration
imabdulbasit Feb 10, 2025
26fbdad
fix VID errors
imabdulbasit Feb 10, 2025
c01b8bf
lint
imabdulbasit Feb 10, 2025
ab4013d
cargo sort
imabdulbasit Feb 10, 2025
3b42047
fix recursion
imabdulbasit Feb 10, 2025
d8d460c
v0 and v1 availability modules
imabdulbasit Feb 10, 2025
e56475f
Merge branch 'main' into ab/leaf2-migration
imabdulbasit Feb 10, 2025
8fe27d1
cargo sort
imabdulbasit Feb 10, 2025
04dbac0
fix tests
imabdulbasit Feb 10, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ marketplace-builder-core = { git = "https://github.com/EspressoSystems/marketpla
marketplace-builder-shared = { git = "https://github.com/EspressoSystems/marketplace-builder-core", tag = "0.1.59" }
hotshot-events-service = { git = "https://github.com/EspressoSystems/hotshot-events-service.git", tag = "0.1.57" }
hotshot-orchestrator = { git = "https://github.com/EspressoSystems/hotshot", tag = "0.5.83" }
hotshot-query-service = { git = "https://github.com/EspressoSystems/hotshot-query-service", tag = "v0.1.76" }
hotshot-query-service = { git = "https://github.com/EspressoSystems/hotshot-query-service", branch = "ab/leaf2-migration" }
hotshot-stake-table = { git = "https://github.com/EspressoSystems/hotshot", tag = "0.5.83" }
hotshot-state-prover = { version = "0.1.0", path = "hotshot-state-prover" }
hotshot-task = { git = "https://github.com/EspressoSystems/hotshot", tag = "0.5.83" }
Expand Down
2 changes: 1 addition & 1 deletion sequencer-sqlite/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

54 changes: 54 additions & 0 deletions sequencer/api/migrations/postgres/V501__epoch_tables.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
CREATE TABLE anchor_leaf2 (
view BIGINT PRIMARY KEY,
leaf BYTEA,
qc BYTEA
);


CREATE TABLE da_proposal2 (
view BIGINT PRIMARY KEY,
payload_hash VARCHAR,
data BYTEA
);

CREATE TABLE vid_share2 (
view BIGINT PRIMARY KEY,
payload_hash VARCHAR,
data BYTEA
);


CREATE TABLE undecided_state2 (
-- The ID is always set to 0. Setting it explicitly allows us to enforce with every insert or
Copy link
Collaborator

@sveitser sveitser Feb 7, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Did you consider adding a constraint for only allowing the ID to be 0?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can add, these tables are just copy of older ones

-- update that there is only a single entry in this table: the latest known state.
id INT PRIMARY KEY,

leaves BYTEA NOT NULL,
state BYTEA NOT NULL
);


CREATE TABLE quorum_proposals2 (
view BIGINT PRIMARY KEY,
leaf_hash VARCHAR,
data BYTEA
);

CREATE UNIQUE INDEX quorum_proposals2_leaf_hash_idx ON quorum_proposals (leaf_hash);
CREATE INDEX da_proposal2_payload_hash_idx ON da_proposal (payload_hash);
CREATE INDEX vid_share2_payload_hash_idx ON vid_share (payload_hash);

CREATE TABLE quorum_certificate2 (
view BIGINT PRIMARY KEY,
leaf_hash VARCHAR NOT NULL,
data BYTEA NOT NULL
);

CREATE INDEX quorum_certificate2_leaf_hash_idx ON quorum_certificate (leaf_hash);

CREATE TABLE epoch_migration (
table_name TEXT PRIMARY KEY,
completed bool DEFAULT FALSE
);

INSERT INTO epoch_migration (table_name) VALUES ('anchor_leaf'), ('da_proposal'), ('vid_share'), ('undecided_state'), ('quorum_proposals'), ('quorum_certificate');
54 changes: 54 additions & 0 deletions sequencer/api/migrations/sqlite/V301__epoch_tables.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
CREATE TABLE anchor_leaf2 (
view BIGINT PRIMARY KEY,
leaf BLOB,
qc BLOB
);


CREATE TABLE da_proposal2 (
view BIGINT PRIMARY KEY,
payload_hash VARCHAR,
data BLOB
);

CREATE TABLE vid_share2 (
view BIGINT PRIMARY KEY,
payload_hash VARCHAR,
data BLOB
);


CREATE TABLE undecided_state2 (
-- The ID is always set to 0. Setting it explicitly allows us to enforce with every insert or
-- update that there is only a single entry in this table: the latest known state.
id INT PRIMARY KEY,

leaves BLOB NOT NULL,
state BLOB NOT NULL
);


CREATE TABLE quorum_proposals2 (
view BIGINT PRIMARY KEY,
leaf_hash VARCHAR,
data BLOB
);

CREATE UNIQUE INDEX quorum_proposals2_leaf_hash_idx ON quorum_proposals (leaf_hash);
CREATE INDEX da_proposal2_payload_hash_idx ON da_proposal (payload_hash);
CREATE INDEX vid_share2_payload_hash_idx ON vid_share (payload_hash);

CREATE TABLE quorum_certificate2 (
view BIGINT PRIMARY KEY,
leaf_hash VARCHAR NOT NULL,
data BLOB NOT NULL
);

CREATE INDEX quorum_certificate2_leaf_hash_idx ON quorum_certificate (leaf_hash);

CREATE TABLE epoch_migration (
table_name TEXT PRIMARY KEY,
completed bool NOT NULL DEFAULT FALSE
);

INSERT INTO epoch_migration (table_name) VALUES ('anchor_leaf'), ('da_proposal'), ('vid_share'), ('undecided_state'), ('quorum_proposals'), ('quorum_certificate');
39 changes: 20 additions & 19 deletions sequencer/src/api.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1065,8 +1065,10 @@ mod api_tests {
use hotshot_query_service::availability::{
AvailabilityDataSource, BlockQueryData, VidCommonQueryData,
};
use hotshot_types::data::{DaProposal2, EpochNumber, VidDisperseShare2};
use hotshot_types::simple_certificate::QuorumCertificate2;
use hotshot_types::{
data::{DaProposal, QuorumProposal2, VidDisperseShare},
data::QuorumProposal2,
event::LeafInfo,
message::Proposal,
simple_certificate::QuorumCertificate,
Expand Down Expand Up @@ -1246,31 +1248,29 @@ mod api_tests {
// Create two non-consecutive leaf chains.
let mut chain1 = vec![];

let genesis = Leaf::genesis(&Default::default(), &NodeState::mock()).await;
let genesis = Leaf2::genesis(&Default::default(), &NodeState::mock()).await;
let payload = genesis.block_payload().unwrap();
let payload_bytes_arc = payload.encode();
let disperse = vid_scheme(2).disperse(payload_bytes_arc.clone()).unwrap();
let payload_commitment = disperse.commit;
let mut quorum_proposal = QuorumProposal2::<SeqTypes> {
block_header: genesis.block_header().clone(),
view_number: ViewNumber::genesis(),
justify_qc: QuorumCertificate::genesis::<MockSequencerVersions>(
justify_qc: QuorumCertificate2::genesis::<MockSequencerVersions>(
&ValidatedState::default(),
&NodeState::mock(),
)
.await
.to_qc2(),
.await,
upgrade_certificate: None,
view_change_evidence: None,
next_drb_result: None,
next_epoch_justify_qc: None,
};
let mut qc = QuorumCertificate::genesis::<MockSequencerVersions>(
let mut qc = QuorumCertificate2::genesis::<MockSequencerVersions>(
&ValidatedState::default(),
&NodeState::mock(),
)
.await
.to_qc2();
.await;

let mut justify_qc = qc.clone();
for i in 0..5 {
Expand All @@ -1288,7 +1288,7 @@ mod api_tests {
PubKey::sign(&privkey, &bincode::serialize(&quorum_proposal).unwrap())
.expect("Failed to sign quorum_proposal");
persistence
.append_quorum_proposal(&Proposal {
.append_quorum_proposal2(&Proposal {
data: quorum_proposal.clone(),
signature: quorum_proposal_signature,
_pd: Default::default(),
Expand All @@ -1297,33 +1297,37 @@ mod api_tests {
.unwrap();

// Include VID information for each leaf.
let share = VidDisperseShare::<SeqTypes> {
let share = VidDisperseShare2::<SeqTypes> {
view_number: leaf.view_number(),
payload_commitment,
share: disperse.shares[0].clone(),
common: disperse.common.clone(),
recipient_key: pubkey,
epoch: EpochNumber::new(0),
target_epoch: EpochNumber::new(0),
data_epoch_payload_commitment: None,
};
persistence
.append_vid(&share.to_proposal(&privkey).unwrap())
.append_vid2(&share.to_proposal(&privkey).unwrap())
.await
.unwrap();

// Include payload information for each leaf.
let block_payload_signature =
PubKey::sign(&privkey, &payload_bytes_arc).expect("Failed to sign block payload");
let da_proposal_inner = DaProposal::<SeqTypes> {
let da_proposal_inner = DaProposal2::<SeqTypes> {
encoded_transactions: payload_bytes_arc.clone(),
metadata: payload.ns_table().clone(),
view_number: leaf.view_number(),
epoch: EpochNumber::new(0),
};
let da_proposal = Proposal {
data: da_proposal_inner,
signature: block_payload_signature,
_pd: Default::default(),
};
persistence
.append_da(&da_proposal, payload_commitment)
.append_da2(&da_proposal, payload_commitment)
.await
.unwrap();
}
Expand Down Expand Up @@ -1369,8 +1373,8 @@ mod api_tests {
for (leaf, qc) in chain1.iter().chain(&chain2) {
tracing::info!(height = leaf.height(), "check archive");
let qd = data_source.get_leaf(leaf.height() as usize).await.await;
let stored_leaf: Leaf2 = qd.leaf().clone().into();
let stored_qc = qd.qc().clone().to_qc2();
let stored_leaf: Leaf2 = qd.leaf().clone();
let stored_qc = qd.qc().clone();
assert_eq!(&stored_leaf, leaf);
assert_eq!(&stored_qc, qc);

Expand Down Expand Up @@ -1489,10 +1493,7 @@ mod api_tests {
.unwrap();

// Check that we still processed the leaf.
assert_eq!(
leaf,
data_source.get_leaf(1).await.await.leaf().clone().into()
);
assert_eq!(leaf, data_source.get_leaf(1).await.await.leaf().clone());
assert!(data_source.get_vid_common(1).await.is_pending());
assert!(data_source.get_block(1).await.is_pending());
}
Expand Down
14 changes: 7 additions & 7 deletions sequencer/src/api/sql.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ use committable::{Commitment, Committable};
use espresso_types::{
get_l1_deposits,
v0_99::{ChainConfig, IterableFeeInfo},
BlockMerkleTree, FeeAccount, FeeMerkleTree, Leaf, Leaf2, NodeState, ValidatedState,
BlockMerkleTree, FeeAccount, FeeMerkleTree, Leaf2, NodeState, ValidatedState,
};
use hotshot::traits::ValidatedState as _;
use hotshot_query_service::{
Expand All @@ -21,7 +21,7 @@ use hotshot_query_service::{
Resolvable,
};
use hotshot_types::{
data::{QuorumProposal, ViewNumber},
data::{QuorumProposal2, ViewNumber},
message::Proposal,
traits::node_implementation::ConsensusTime,
};
Expand Down Expand Up @@ -257,7 +257,7 @@ async fn load_accounts<Mode: TransactionMode>(
}
}

Ok((snapshot, leaf.leaf().clone().into()))
Ok((snapshot, leaf.leaf().clone()))
}

async fn load_chain_config<Mode: TransactionMode>(
Expand Down Expand Up @@ -286,7 +286,7 @@ async fn reconstruct_state<Mode: TransactionMode>(
.get_leaf((from_height as usize).into())
.await
.context(format!("leaf {from_height} not available"))?;
let from_leaf: Leaf2 = from_leaf.leaf().clone().into();
let from_leaf: Leaf2 = from_leaf.leaf().clone();
ensure!(
from_leaf.view_number() < to_view,
"state reconstruction: starting state {:?} must be before ending state {to_view:?}",
Expand Down Expand Up @@ -440,13 +440,13 @@ where
P: Type<Db> + for<'q> Encode<'q, Db>,
{
let (data,) = query_as::<(Vec<u8>,)>(&format!(
"SELECT data FROM quorum_proposals WHERE {where_clause} LIMIT 1",
"SELECT data FROM quorum_proposals2 WHERE {where_clause} LIMIT 1",
))
.bind(param)
.fetch_one(tx.as_mut())
.await?;
let proposal: Proposal<SeqTypes, QuorumProposal<SeqTypes>> = bincode::deserialize(&data)?;
Ok(Leaf::from_quorum_proposal(&proposal.data).into())
let proposal: Proposal<SeqTypes, QuorumProposal2<SeqTypes>> = bincode::deserialize(&data)?;
Ok(Leaf2::from_quorum_proposal(&proposal.data))
}

#[cfg(any(test, feature = "testing"))]
Expand Down
Loading
Loading