Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Tvl pool staking #14775

Draft
wants to merge 31 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from 17 commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
f435c64
make it mostly work, still needs a bit more
kianenigma Feb 6, 2023
8b4c90d
init
PieWol Aug 14, 2023
fdd0f52
total_balance updated
PieWol Aug 14, 2023
b80f63a
comments
PieWol Aug 14, 2023
1461e0c
Merge remote-tracking branch 'origin/master' into tvl-pool-staking
PieWol Aug 14, 2023
2e8027e
compiling rebase
PieWol Aug 14, 2023
8c69fb8
slashed_total in OnStakingUpdate::on_slash
PieWol Aug 15, 2023
6ead93a
nightly fmt
PieWol Aug 15, 2023
bcffd00
test fix
PieWol Aug 15, 2023
e936de8
remove irrelevant changes
PieWol Aug 15, 2023
6116180
tvl checks on do_try_state
PieWol Aug 15, 2023
cb69b5c
tvl migration
PieWol Aug 15, 2023
5a463c3
fix try_runtime
PieWol Aug 16, 2023
c332c0c
optimize migration
PieWol Aug 16, 2023
ec91ce2
VersionedRuntimeUpgrade
PieWol Aug 16, 2023
db59e96
removed unneccessary logs
PieWol Aug 16, 2023
35027bc
comments improved
PieWol Aug 16, 2023
5098b18
try_runtime total_balance rework
PieWol Aug 17, 2023
2d7a7b1
total_balance simplified
PieWol Aug 18, 2023
672909f
on_slash and do_try_state adjusted
PieWol Aug 18, 2023
6aa682a
test for slash tracking without subpools
PieWol Aug 18, 2023
53e99a0
test fixes and comments.
PieWol Aug 19, 2023
de98dd6
Merge branch 'paritytech:master' into tvl-pool-staking
PieWol Aug 19, 2023
4db0054
points and tvl need to stay in check.
PieWol Aug 19, 2023
4c0dece
undo accidental changes
PieWol Aug 20, 2023
db8f4af
Merge branch 'master' of github.com:paritytech/substrate into tvl-poo…
PieWol Aug 22, 2023
f0dc257
fix migration
PieWol Aug 22, 2023
ed63e99
poolmembers not relevant for TVL
PieWol Aug 25, 2023
69e259e
helper function for staking withdrawal.
PieWol Aug 25, 2023
605838c
Merge branch 'master' into tvl-pool-staking
PieWol Aug 29, 2023
6402f1f
add tvl sanity check via member total_balance
PieWol Aug 29, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
59 changes: 47 additions & 12 deletions frame/nomination-pools/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -535,6 +535,16 @@ impl<T: Config> PoolMember<T> {
}
}

/// Total balance of the member.
#[cfg(any(feature = "try-runtime", test))]
fn total_balance(&self) -> BalanceOf<T> {
if let Some(pool) = BondedPool::<T>::get(self.pool_id).defensive() {
pool.points_to_balance(self.total_points())
} else {
Zero::zero()
}
}

/// Total points of this member, both active and unbonding.
fn total_points(&self) -> BalanceOf<T> {
self.active_points().saturating_add(self.unbonding_points())
Expand Down Expand Up @@ -963,9 +973,13 @@ impl<T: Config> BondedPool<T> {
}

/// Issue points to [`Self`] for `new_funds`.
/// Increase the [`TotalValueLocked`] by `new_funds`.
fn issue(&mut self, new_funds: BalanceOf<T>) -> BalanceOf<T> {
let points_to_issue = self.balance_to_point(new_funds);
self.points = self.points.saturating_add(points_to_issue);
TotalValueLocked::<T>::mutate(|tvl| {
tvl.saturating_accrue(new_funds);
});
points_to_issue
}

Expand Down Expand Up @@ -1183,9 +1197,8 @@ impl<T: Config> BondedPool<T> {

/// Bond exactly `amount` from `who`'s funds into this pool.
///
/// If the bond type is `Create`, `Staking::bond` is called, and `who`
/// is allowed to be killed. Otherwise, `Staking::bond_extra` is called and `who`
/// cannot be killed.
/// If the bond is [`BondType::Create`], [`Staking::bond`] is called, and `who` is allowed to be
/// killed. Otherwise, [`Staking::bond_extra`] is called and `who` cannot be killed.
///
/// Returns `Ok(points_issues)`, `Err` otherwise.
fn try_bond_funds(
Expand Down Expand Up @@ -1416,8 +1429,8 @@ impl<T: Config> UnbondPool<T> {
new_points
}

/// Dissolve some points from the unbonding pool, reducing the balance of the pool
/// proportionally.
/// Dissolve some points from the unbonding pool, reducing the balance of the pool and the
/// [`TotalValueLocked`] proportionally.
///
/// This is the opposite of `issue`.
///
Expand All @@ -1427,6 +1440,10 @@ impl<T: Config> UnbondPool<T> {
self.points = self.points.saturating_sub(points);
self.balance = self.balance.saturating_sub(balance_to_unbond);

TotalValueLocked::<T>::mutate(|tvl| {
tvl.saturating_reduce(balance_to_unbond);
});

balance_to_unbond
}
}
Expand Down Expand Up @@ -1577,6 +1594,10 @@ pub mod pallet {
type MaxUnbonding: Get<u32>;
}

/// TVL across all pools
#[pallet::storage]
pub type TotalValueLocked<T: Config> = StorageValue<_, BalanceOf<T>, ValueQuery>;

/// Minimum amount to bond to join a pool.
#[pallet::storage]
pub type MinJoinBond<T: Config> = StorageValue<_, BalanceOf<T>, ValueQuery>;
Expand Down Expand Up @@ -1795,9 +1816,9 @@ pub mod pallet {
CannotWithdrawAny,
/// The amount does not meet the minimum bond to either join or create a pool.
///
/// The depositor can never unbond to a value less than
/// `Pallet::depositor_min_bond`. The caller does not have nominating
/// permissions for the pool. Members can never unbond to a value below `MinJoinBond`.
/// The depositor can never unbond to a value less than `Pallet::depositor_min_bond`. The
/// caller does not have nominating permissions for the pool. Members can never unbond to a
/// value below `MinJoinBond`.
MinimumBondNotMet,
/// The transaction could not be executed due to overflow risk for the pool.
OverflowRisk,
Expand Down Expand Up @@ -3077,6 +3098,7 @@ impl<T: Config> Pallet<T> {
bonded_pools == reward_pools,
"`BondedPools` and `RewardPools` must all have the EXACT SAME key-set."
);
let mut expected_tvl = Zero::zero();

ensure!(
SubPoolsStorage::<T>::iter_keys().all(|k| bonded_pools.contains(&k)),
Expand Down Expand Up @@ -3169,12 +3191,17 @@ impl<T: Config> Pallet<T> {
"depositor must always have MinCreateBond stake in the pool, except for when the \
pool is being destroyed and the depositor is the last member",
);
expected_tvl += T::Staking::total_stake(&bonded_pool.bonded_account())
.expect("all pools must have total stake");
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Rather than spread the try_state logic for expected TVL across multiple areas of the try-state hook, could you please move it all together?

It's fine to iterate over pools again for this purpose.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I put the TVL checks into one place now but the check for TVL integrity is now done via the PoolMembers instead of all BondingPools. I would love some feedback on the way it's done now.


Ok(())
})?;

ensure!(
MaxPoolMembers::<T>::get().map_or(true, |max| all_members <= max),
Error::<T>::MaxPoolMembers
);
assert_eq!(TotalValueLocked::<T>::get(), expected_tvl);

if level <= 1 {
return Ok(())
Expand Down Expand Up @@ -3269,12 +3296,18 @@ impl<T: Config> sp_staking::OnStakingUpdate<T::AccountId, BalanceOf<T>> for Pall
// anything here.
slashed_bonded: BalanceOf<T>,
slashed_unlocking: &BTreeMap<EraIndex, BalanceOf<T>>,
total_slashed: BalanceOf<T>,
) {
if let Some(pool_id) = ReversePoolIdLookup::<T>::get(pool_account) {
let mut sub_pools = match SubPoolsStorage::<T>::get(pool_id).defensive() {
if let Some(pool_id) = ReversePoolIdLookup::<T>::get(pool_account).defensive() {
// TODO: bug here: a slash toward a pool who's subpools were missing would not be
// tracked. write a test for it.
Self::deposit_event(Event::<T>::PoolSlashed { pool_id, balance: slashed_bonded });

let mut sub_pools = match SubPoolsStorage::<T>::get(pool_id) {
Some(sub_pools) => sub_pools,
None => return,
};

for (era, slashed_balance) in slashed_unlocking.iter() {
if let Some(pool) = sub_pools.with_era.get_mut(era) {
pool.balance = *slashed_balance;
Expand All @@ -3285,9 +3318,11 @@ impl<T: Config> sp_staking::OnStakingUpdate<T::AccountId, BalanceOf<T>> for Pall
});
}
}

Self::deposit_event(Event::<T>::PoolSlashed { pool_id, balance: slashed_bonded });
SubPoolsStorage::<T>::insert(pool_id, sub_pools);

TotalValueLocked::<T>::mutate(|tvl| {
tvl.saturating_reduce(total_slashed);
});
}
}
}
95 changes: 95 additions & 0 deletions frame/nomination-pools/src/migration.rs
Original file line number Diff line number Diff line change
Expand Up @@ -724,4 +724,99 @@ pub mod v5 {
Ok(())
}
}

/// This migration accumulates and initializes the [`TotalValueLocked`] for all pools.
pub struct VersionUncheckedMigrateV5ToV6<T>(sp_std::marker::PhantomData<T>);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I hope nobody is us using nomination pools on parachains, but we should mention in the migration that it is using lots of weight and may not be suitable for parachains.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for pointing it out. How do you think this mention should be done? Just a comment in the code?

impl<T: Config> OnRuntimeUpgrade for VersionUncheckedMigrateV5ToV6<T> {
fn on_runtime_upgrade() -> Weight {
let migrated = BondedPools::<T>::count();
let tvl: BalanceOf<T> = BondedPools::<T>::iter()
.map(|(id, inner)| {
T::Staking::total_stake(&BondedPool { id, inner }.bonded_account())
.unwrap_or_default()
})
.reduce(|acc, balance| acc + balance)
.unwrap_or_default();

TotalValueLocked::<T>::set(tvl);

log!(info, "Upgraded {} pools", migrated);

// reads: migrated * (BondedPools + Staking::total_stake) + count + onchain
// version
//
// writes: current version + TVL
T::DbWeight::get().reads_writes(migrated.saturating_mul(2).saturating_add(2).into(), 2)
}

#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
ensure!(
PoolMembers::<T>::iter_keys().count() == PoolMembers::<T>::iter_values().count(),
"There are undecodable PoolMembers in storage. This migration will not fix that."
);
ensure!(
BondedPools::<T>::iter_keys().count() == BondedPools::<T>::iter_values().count(),
"There are undecodable BondedPools in storage. This migration will not fix that."
);
ensure!(
SubPoolsStorage::<T>::iter_keys().count() ==
SubPoolsStorage::<T>::iter_values().count(),
"There are undecodable SubPools in storage. This migration will not fix that."
);
ensure!(
Metadata::<T>::iter_keys().count() == Metadata::<T>::iter_values().count(),
"There are undecodable Metadata in storage. This migration will not fix that."
);

Ok(Vec::new())
}

#[cfg(feature = "try-runtime")]
fn post_upgrade(data: Vec<u8>) -> Result<(), TryRuntimeError> {
// ensure [`TotalValueLocked`] contains a value greater zero if any instances of
// BondedPools exist.
if !BondedPools::<T>::count().is_zero() {
ensure!(!TotalValueLocked::<T>::get().is_zero(), "tvl written incorrectly");
}

ensure!(
Pallet::<T>::on_chain_storage_version() >= 6,
"nomination-pools::migration::v6: wrong storage version"
);

// These should not have been touched - just in case.
ensure!(
PoolMembers::<T>::iter_keys().count() == PoolMembers::<T>::iter_values().count(),
"There are undecodable PoolMembers in storage."
);
ensure!(
BondedPools::<T>::iter_keys().count() == BondedPools::<T>::iter_values().count(),
"There are undecodable BondedPools in storage."
);
ensure!(
SubPoolsStorage::<T>::iter_keys().count() ==
SubPoolsStorage::<T>::iter_values().count(),
"There are undecodable SubPools in storage."
);
ensure!(
Metadata::<T>::iter_keys().count() == Metadata::<T>::iter_values().count(),
"There are undecodable Metadata in storage."
);

Ok(())
}
}

/// [`VersionUncheckedMigrateV5ToV6`] wrapped in a
/// [`frame_support::migrations::VersionedRuntimeUpgrade`], ensuring the migration is only
/// performed when on-chain version is 5.
#[cfg(feature = "experimental")]
pub type VersionCheckedMigrateV5ToV6<T, I> = frame_support::migrations::VersionedRuntimeUpgrade<
5,
6,
VersionUncheckedMigrateV5ToV6<T, I>,
crate::pallet::Pallet<T, I>,
<T as frame_system::Config>::DbWeight,
>;
}
43 changes: 30 additions & 13 deletions frame/nomination-pools/src/mock.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ use crate::{self as pools};
use frame_support::{assert_ok, parameter_types, PalletId};
use frame_system::RawOrigin;
use sp_runtime::{BuildStorage, FixedU128};
use sp_staking::Stake;
use sp_staking::{OnStakingUpdate, Stake};

pub type BlockNumber = u64;
pub type AccountId = u128;
Expand All @@ -28,7 +28,8 @@ parameter_types! {
pub static CurrentEra: EraIndex = 0;
pub static BondingDuration: EraIndex = 3;
pub storage BondedBalanceMap: BTreeMap<AccountId, Balance> = Default::default();
pub storage UnbondingBalanceMap: BTreeMap<AccountId, Balance> = Default::default();
// map from user, to a vec of era to amount being unlocked in that era.
pub storage UnbondingBalanceMap: BTreeMap<AccountId, Vec<(EraIndex, Balance)>> = Default::default();
#[derive(Clone, PartialEq)]
pub static MaxUnbonding: u32 = 8;
pub static StakingMinBond: Balance = 10;
Expand All @@ -42,6 +43,14 @@ impl StakingMock {
x.insert(who, bonded);
BondedBalanceMap::set(&x)
}

pub(crate) fn slash_to(pool_id: PoolId, amount: Balance) {
let acc = Pools::create_bonded_account(pool_id);
let bonded = BondedBalanceMap::get();
let pre_total = bonded.get(&acc).unwrap();
Self::set_bonded_balance(acc, pre_total - amount);
Pools::on_slash(&acc, pre_total - amount, &Default::default(), amount);
}
}

impl sp_staking::StakingInterface for StakingMock {
Expand Down Expand Up @@ -87,8 +96,11 @@ impl sp_staking::StakingInterface for StakingMock {
let mut x = BondedBalanceMap::get();
*x.get_mut(who).unwrap() = x.get_mut(who).unwrap().saturating_sub(amount);
BondedBalanceMap::set(&x);

let era = Self::current_era();
let unlocking_at = era + Self::bonding_duration();
let mut y = UnbondingBalanceMap::get();
*y.entry(*who).or_insert(Self::Balance::zero()) += amount;
y.entry(*who).or_insert(Default::default()).push((unlocking_at, amount));
UnbondingBalanceMap::set(&y);
Ok(())
}
Expand All @@ -98,11 +110,13 @@ impl sp_staking::StakingInterface for StakingMock {
}

fn withdraw_unbonded(who: Self::AccountId, _: u32) -> Result<bool, DispatchError> {
// Simulates removing unlocking chunks and only having the bonded balance locked
let mut x = UnbondingBalanceMap::get();
x.remove(&who);
UnbondingBalanceMap::set(&x);
let mut unbonding_map = UnbondingBalanceMap::get();
let staker_map = unbonding_map.get_mut(&who).ok_or("not a staker")?;

let current_era = Self::current_era();
staker_map.retain(|(unlocking_at, _amount)| *unlocking_at > current_era);

UnbondingBalanceMap::set(&unbonding_map);
Ok(UnbondingBalanceMap::get().is_empty() && BondedBalanceMap::get().is_empty())
}

Expand All @@ -126,14 +140,17 @@ impl sp_staking::StakingInterface for StakingMock {
}

fn stake(who: &Self::AccountId) -> Result<Stake<Balance>, DispatchError> {
match (
UnbondingBalanceMap::get().get(who).copied(),
BondedBalanceMap::get().get(who).copied(),
) {
match (UnbondingBalanceMap::get().get(who), BondedBalanceMap::get().get(who).copied()) {
(None, None) => Err(DispatchError::Other("balance not found")),
(Some(v), None) => Ok(Stake { total: v, active: 0 }),
(Some(v), None) => Ok(Stake {
total: v.into_iter().fold(0u128, |acc, &x| acc.saturating_add(x.1)),
active: 0,
}),
(None, Some(v)) => Ok(Stake { total: v, active: v }),
(Some(a), Some(b)) => Ok(Stake { total: a + b, active: b }),
(Some(a), Some(b)) => Ok(Stake {
total: a.into_iter().fold(0u128, |acc, &x| acc.saturating_add(x.1)) + b,
active: b,
}),
}
}

Expand Down
Loading
Loading