Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

backing: move the min votes threshold to the runtime #7577

Draft
wants to merge 6 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
58 changes: 43 additions & 15 deletions node/core/backing/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -80,8 +80,8 @@ use futures::{

use error::{Error, FatalResult};
use polkadot_node_primitives::{
minimum_votes, AvailableData, InvalidCandidate, PoV, SignedFullStatementWithPVD,
StatementWithPVD, ValidationResult,
AvailableData, InvalidCandidate, PoV, SignedFullStatementWithPVD, StatementWithPVD,
ValidationResult,
};
use polkadot_node_subsystem::{
messages::{
Expand All @@ -96,8 +96,7 @@ use polkadot_node_subsystem::{
use polkadot_node_subsystem_util::{
self as util,
backing_implicit_view::{FetchError as ImplicitViewFetchError, View as ImplicitView},
request_from_runtime, request_session_index_for_child, request_validator_groups,
request_validators,
request_from_runtime, request_validator_groups, request_validators,
runtime::{prospective_parachains_mode, ProspectiveParachainsMode},
Validator,
};
Expand All @@ -116,6 +115,7 @@ use statement_table::{
},
Config as TableConfig, Context as TableContextTrait, Table,
};
use util::runtime::{request_min_backing_votes, RuntimeInfo};

mod error;

Expand Down Expand Up @@ -219,6 +219,8 @@ struct PerRelayParentState {
awaiting_validation: HashSet<CandidateHash>,
/// Data needed for retrying in case of `ValidatedCandidateCommand::AttestNoPoV`.
fallbacks: HashMap<CandidateHash, AttestingData>,
/// The minimum backing votes threshold.
minimum_backing_votes: u32,
}

struct PerCandidateState {
Expand Down Expand Up @@ -275,6 +277,8 @@ struct State {
background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>,
/// The handle to the keystore used for signing.
keystore: KeystorePtr,
/// Runtime info cached per-session.
runtime_info: RuntimeInfo,
}

impl State {
Expand All @@ -289,6 +293,7 @@ impl State {
per_candidate: HashMap::new(),
background_validation_tx,
keystore,
runtime_info: RuntimeInfo::new(None),
}
}
}
Expand Down Expand Up @@ -400,8 +405,8 @@ impl TableContextTrait for TableContext {
self.groups.get(group).map_or(false, |g| g.iter().any(|a| a == authority))
}

fn requisite_votes(&self, group: &ParaId) -> usize {
self.groups.get(group).map_or(usize::MAX, |g| minimum_votes(g.len()))
fn get_group_size(&self, group: &ParaId) -> Option<usize> {
self.groups.get(group).map(|g| g.len())
}
}

Expand Down Expand Up @@ -943,7 +948,14 @@ async fn handle_active_leaves_update<Context>(

// construct a `PerRelayParent` from the runtime API
// and insert it.
let per = construct_per_relay_parent_state(ctx, maybe_new, &state.keystore, mode).await?;
let per = construct_per_relay_parent_state(
ctx,
maybe_new,
&state.keystore,
&mut state.runtime_info,
mode,
)
.await?;

if let Some(per) = per {
state.per_relay_parent.insert(maybe_new, per);
Expand All @@ -959,6 +971,7 @@ async fn construct_per_relay_parent_state<Context>(
ctx: &mut Context,
relay_parent: Hash,
keystore: &KeystorePtr,
runtime_info: &mut RuntimeInfo,
mode: ProspectiveParachainsMode,
) -> Result<Option<PerRelayParentState>, Error> {
macro_rules! try_runtime_api {
Expand All @@ -983,10 +996,18 @@ async fn construct_per_relay_parent_state<Context>(

let parent = relay_parent;

let (validators, groups, session_index, cores) = futures::try_join!(
let session_index =
try_runtime_api!(runtime_info.get_session_index_for_child(ctx.sender(), parent).await);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ok, this wasn't introduced by you, but this macro completely defeats the point of our error handling. If we don't want to bring down the node, the correct way of handling this would be to make all runtime errors non fatal, instead of swallowing them in a macro.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The only fatal error we have here is canceled which never happens, except the node is shutting down, so this is fine.


let minimum_backing_votes =
request_min_backing_votes(parent, ctx.sender(), |parent, sender| {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why do we need this wrapper? Can't get_min_backing_votes not already fall back to the constant in case of unsupported?

runtime_info.get_min_backing_votes(sender, session_index, parent)
})
.await?;

let (validators, groups, cores) = futures::try_join!(
request_validators(parent, ctx.sender()).await,
request_validator_groups(parent, ctx.sender()).await,
request_session_index_for_child(parent, ctx.sender()).await,
request_from_runtime(parent, ctx.sender(), |tx| {
RuntimeApiRequest::AvailabilityCores(tx)
},)
Expand All @@ -996,7 +1017,6 @@ async fn construct_per_relay_parent_state<Context>(

let validators: Vec<_> = try_runtime_api!(validators);
let (validator_groups, group_rotation_info) = try_runtime_api!(groups);
let session_index = try_runtime_api!(session_index);
let cores = try_runtime_api!(cores);

let signing_context = SigningContext { parent_hash: parent, session_index };
Expand Down Expand Up @@ -1061,6 +1081,7 @@ async fn construct_per_relay_parent_state<Context>(
issued_statements: HashSet::new(),
awaiting_validation: HashSet::new(),
fallbacks: HashMap::new(),
minimum_backing_votes,
}))
}

Expand Down Expand Up @@ -1563,10 +1584,13 @@ async fn post_import_statement_actions<Context>(
rp_state: &mut PerRelayParentState,
summary: Option<&TableSummary>,
) -> Result<(), Error> {
if let Some(attested) = summary
.as_ref()
.and_then(|s| rp_state.table.attested_candidate(&s.candidate, &rp_state.table_context))
{
if let Some(attested) = summary.as_ref().and_then(|s| {
rp_state.table.attested_candidate(
&s.candidate,
&rp_state.table_context,
rp_state.minimum_backing_votes,
)
}) {
let candidate_hash = attested.candidate.hash();

// `HashSet::insert` returns true if the thing wasn't in there already.
Expand Down Expand Up @@ -2009,7 +2033,11 @@ fn handle_get_backed_candidates_message(
};
rp_state
.table
.attested_candidate(&candidate_hash, &rp_state.table_context)
.attested_candidate(
&candidate_hash,
&rp_state.table_context,
rp_state.minimum_backing_votes,
)
.and_then(|attested| table_attested_to_backed(attested, &rp_state.table_context))
})
.collect();
Expand Down
49 changes: 39 additions & 10 deletions node/core/backing/src/tests/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ struct TestState {
head_data: HashMap<ParaId, HeadData>,
signing_context: SigningContext,
relay_parent: Hash,
minimum_backing_votes: u32,
}

impl TestState {
Expand Down Expand Up @@ -150,6 +151,7 @@ impl Default for TestState {
validation_data,
signing_context,
relay_parent,
minimum_backing_votes: 2,
}
}
}
Expand Down Expand Up @@ -250,33 +252,60 @@ async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestS
}
);

// Check that subsystem job issues a request for a validator set.
// Check that subsystem job issues a request for the session index for child.
assert_matches!(
virtual_overseer.recv().await,
AllMessages::RuntimeApi(
RuntimeApiMessage::Request(parent, RuntimeApiRequest::Validators(tx))
RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionIndexForChild(tx))
) if parent == test_state.relay_parent => {
tx.send(Ok(test_state.validator_public.clone())).unwrap();
tx.send(Ok(test_state.signing_context.session_index)).unwrap();
}
);

// Check that subsystem job issues a request for the validator groups.
// Check that subsystem job issues a request for the runtime API version.
// assert_matches!(
// virtual_overseer.recv().await,
// AllMessages::RuntimeApi(
// RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx))
// ) if parent == test_state.relay_parent => {
// tx.send(Ok(RuntimeApiRequest::MINIMUM_BACKING_VOTES_RUNTIME_REQUIREMENT)).unwrap();
// }
// );

// Check if subsystem job issues a request for the minimum backing votes.
// This may or may not happen, depending if the minimum backing votes is already cached in the
// RuntimeInfo.
let next_message = {
let msg = virtual_overseer.recv().await;
match msg {
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
parent,
RuntimeApiRequest::MinimumBackingVotes(tx),
)) if parent == test_state.relay_parent => {
tx.send(Ok(test_state.minimum_backing_votes)).unwrap();
virtual_overseer.recv().await
},
_ => msg,
}
};

// Check that subsystem job issues a request for a validator set.
assert_matches!(
virtual_overseer.recv().await,
next_message,
AllMessages::RuntimeApi(
RuntimeApiMessage::Request(parent, RuntimeApiRequest::ValidatorGroups(tx))
RuntimeApiMessage::Request(parent, RuntimeApiRequest::Validators(tx))
) if parent == test_state.relay_parent => {
tx.send(Ok(test_state.validator_groups.clone())).unwrap();
tx.send(Ok(test_state.validator_public.clone())).unwrap();
}
);

// Check that subsystem job issues a request for the session index for child.
// Check that subsystem job issues a request for the validator groups.
assert_matches!(
virtual_overseer.recv().await,
AllMessages::RuntimeApi(
RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionIndexForChild(tx))
RuntimeApiMessage::Request(parent, RuntimeApiRequest::ValidatorGroups(tx))
) if parent == test_state.relay_parent => {
tx.send(Ok(test_state.signing_context.session_index)).unwrap();
tx.send(Ok(test_state.validator_groups.clone())).unwrap();
}
);

Expand Down
40 changes: 29 additions & 11 deletions node/core/backing/src/tests/prospective_parachains.rs
Original file line number Diff line number Diff line change
Expand Up @@ -138,13 +138,41 @@ async fn activate_leaf(
}

for (hash, number) in ancestry_iter.take(requested_len) {
// Check that subsystem job issues a request for a validator set.
let msg = match next_overseer_message.take() {
Some(msg) => msg,
None => virtual_overseer.recv().await,
};

// Check that subsystem job issues a request for the session index for child.
assert_matches!(
msg,
AllMessages::RuntimeApi(
RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionIndexForChild(tx))
) if parent == hash => {
tx.send(Ok(test_state.signing_context.session_index)).unwrap();
}
);

// Check if subsystem job issues a request for the minimum backing votes.
// This may or may not happen, depending if the minimum backing votes is already cached in
// the `RuntimeInfo`.
let next_message = {
let msg = virtual_overseer.recv().await;
match msg {
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
parent,
RuntimeApiRequest::MinimumBackingVotes(tx),
)) if parent == hash => {
tx.send(Ok(test_state.minimum_backing_votes)).unwrap();
virtual_overseer.recv().await
},
_ => msg,
}
};

// Check that subsystem job issues a request for a validator set.
assert_matches!(
next_message,
AllMessages::RuntimeApi(
RuntimeApiMessage::Request(parent, RuntimeApiRequest::Validators(tx))
) if parent == hash => {
Expand All @@ -164,16 +192,6 @@ async fn activate_leaf(
}
);

// Check that subsystem job issues a request for the session index for child.
assert_matches!(
virtual_overseer.recv().await,
AllMessages::RuntimeApi(
RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionIndexForChild(tx))
) if parent == hash => {
tx.send(Ok(test_state.signing_context.session_index)).unwrap();
}
);

// Check that subsystem job issues a request for the availability cores.
assert_matches!(
virtual_overseer.recv().await,
Expand Down
15 changes: 15 additions & 0 deletions node/core/runtime-api/src/cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ const DEFAULT_CACHE_CAP: NonZeroUsize = match NonZeroUsize::new(128) {
pub(crate) struct RequestResultCache {
authorities: LruCache<Hash, Vec<AuthorityDiscoveryId>>,
validators: LruCache<Hash, Vec<ValidatorId>>,
minimum_backing_votes: LruCache<Hash, u32>,
validator_groups: LruCache<Hash, (Vec<Vec<ValidatorIndex>>, GroupRotationInfo)>,
availability_cores: LruCache<Hash, Vec<CoreState>>,
persisted_validation_data:
Expand Down Expand Up @@ -78,6 +79,7 @@ impl Default for RequestResultCache {
Self {
authorities: LruCache::new(DEFAULT_CACHE_CAP),
validators: LruCache::new(DEFAULT_CACHE_CAP),
minimum_backing_votes: LruCache::new(DEFAULT_CACHE_CAP),
validator_groups: LruCache::new(DEFAULT_CACHE_CAP),
availability_cores: LruCache::new(DEFAULT_CACHE_CAP),
persisted_validation_data: LruCache::new(DEFAULT_CACHE_CAP),
Expand Down Expand Up @@ -131,6 +133,18 @@ impl RequestResultCache {
self.validators.put(relay_parent, validators);
}

pub(crate) fn minimum_backing_votes(&mut self, relay_parent: &Hash) -> Option<u32> {
self.minimum_backing_votes.get(relay_parent).copied()
}

pub(crate) fn cache_minimum_backing_votes(
&mut self,
relay_parent: Hash,
minimum_backing_votes: u32,
) {
self.minimum_backing_votes.put(relay_parent, minimum_backing_votes);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No. This should be cached per SessionIndex not per relay parent as it is only supposed to change at session boundaries.

}

pub(crate) fn validator_groups(
&mut self,
relay_parent: &Hash,
Expand Down Expand Up @@ -472,6 +486,7 @@ pub(crate) enum RequestResult {
// The structure of each variant is (relay_parent, [params,]*, result)
Authorities(Hash, Vec<AuthorityDiscoveryId>),
Validators(Hash, Vec<ValidatorId>),
MinimumBackingVotes(Hash, u32),
ValidatorGroups(Hash, (Vec<Vec<ValidatorIndex>>, GroupRotationInfo)),
AvailabilityCores(Hash, Vec<CoreState>),
PersistedValidationData(Hash, ParaId, OccupiedCoreAssumption, Option<PersistedValidationData>),
Expand Down
11 changes: 11 additions & 0 deletions node/core/runtime-api/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,9 @@ where
self.requests_cache.cache_authorities(relay_parent, authorities),
Validators(relay_parent, validators) =>
self.requests_cache.cache_validators(relay_parent, validators),
MinimumBackingVotes(relay_parent, minimum_backing_votes) => self
.requests_cache
.cache_minimum_backing_votes(relay_parent, minimum_backing_votes),
ValidatorGroups(relay_parent, groups) =>
self.requests_cache.cache_validator_groups(relay_parent, groups),
AvailabilityCores(relay_parent, cores) =>
Expand Down Expand Up @@ -301,6 +304,8 @@ where
Request::StagingAsyncBackingParams(sender) =>
query!(staging_async_backing_params(), sender)
.map(|sender| Request::StagingAsyncBackingParams(sender)),
Request::MinimumBackingVotes(sender) => query!(minimum_backing_votes(), sender)
.map(|sender| Request::MinimumBackingVotes(sender)),
}
}

Expand Down Expand Up @@ -551,6 +556,12 @@ where
ver = Request::SUBMIT_REPORT_DISPUTE_LOST_RUNTIME_REQUIREMENT,
sender
),
Request::MinimumBackingVotes(sender) => query!(
MinimumBackingVotes,
minimum_backing_votes(),
ver = Request::MINIMUM_BACKING_VOTES_RUNTIME_REQUIREMENT,
sender
),

Request::StagingParaBackingState(para, sender) => {
query!(
Expand Down
4 changes: 4 additions & 0 deletions node/core/runtime-api/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -264,6 +264,10 @@ impl RuntimeApiSubsystemClient for MockSubsystemClient {
) -> Result<Option<vstaging::BackingState>, ApiError> {
todo!("Not required for tests")
}

async fn minimum_backing_votes(&self, _: Hash) -> Result<u32, ApiError> {
todo!("Not required for tests")
}
}

#[test]
Expand Down