Skip to content

Commit

Permalink
add explicit_into_iter_loop clippy lint (paradigmxyz#8569)
Browse files Browse the repository at this point in the history
  • Loading branch information
tcoratger authored and mw2000 committed Jun 5, 2024
1 parent 270b5e0 commit f0a88d2
Show file tree
Hide file tree
Showing 20 changed files with 32 additions and 32 deletions.
1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,7 @@ match_same_arms = "warn"
doc_markdown = "warn"
unnecessary_struct_initialization = "warn"
string_lit_as_bytes = "warn"
explicit_into_iter_loop = "warn"
type_repetition_in_bounds = "warn"

# These are nursery lints which have findings. Allow them for now. Some are not
Expand Down
2 changes: 1 addition & 1 deletion crates/blockchain-tree/src/block_indices.rs
Original file line number Diff line number Diff line change
Expand Up @@ -328,7 +328,7 @@ impl BlockIndices {

let mut lose_chains = BTreeSet::new();

for block_hash in finalized_blocks.into_iter() {
for block_hash in finalized_blocks {
// there is a fork block.
if let Some(fork_blocks) = self.fork_to_child.remove(&block_hash) {
lose_chains = fork_blocks.into_iter().fold(lose_chains, |mut fold, fork_child| {
Expand Down
8 changes: 4 additions & 4 deletions crates/blockchain-tree/src/blockchain_tree.rs
Original file line number Diff line number Diff line change
Expand Up @@ -897,7 +897,7 @@ where
hashes: impl IntoIterator<Item = impl Into<BlockNumHash>>,
) -> ProviderResult<()> {
// check unconnected block buffer for children of the canonical hashes
for added_block in hashes.into_iter() {
for added_block in hashes {
self.try_connect_buffered_blocks(added_block.into())
}

Expand All @@ -908,7 +908,7 @@ where
all_chain_blocks.push(BlockNumHash { number, hash: block.hash() })
}
}
for block in all_chain_blocks.into_iter() {
for block in all_chain_blocks {
self.try_connect_buffered_blocks(block)
}

Expand All @@ -927,7 +927,7 @@ where
// first remove all the children of the new block from the buffer
let include_blocks = self.state.buffered_blocks.remove_block_with_children(&new_block.hash);
// then try to reinsert them into the tree
for block in include_blocks.into_iter() {
for block in include_blocks {
// don't fail on error, just ignore the block.
let _ = self
.try_insert_validated_block(block, BlockValidationKind::SkipStateRootValidation)
Expand Down Expand Up @@ -1506,7 +1506,7 @@ mod tests {
}
if let Some(fork_to_child) = self.fork_to_child {
let mut x: HashMap<BlockHash, LinkedHashSet<BlockHash>> = HashMap::new();
for (key, hash_set) in fork_to_child.into_iter() {
for (key, hash_set) in fork_to_child {
x.insert(key, hash_set.into_iter().collect());
}
assert_eq!(*tree.state.block_indices.fork_to_child(), x);
Expand Down
4 changes: 2 additions & 2 deletions crates/net/discv4/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ impl Discv4Config {
&mut self,
pairs: impl IntoIterator<Item = (impl Into<Vec<u8>>, Bytes)>,
) -> &mut Self {
for (k, v) in pairs.into_iter() {
for (k, v) in pairs {
self.add_eip868_rlp_pair(k, v);
}
self
Expand Down Expand Up @@ -252,7 +252,7 @@ impl Discv4ConfigBuilder {
&mut self,
pairs: impl IntoIterator<Item = (impl Into<Vec<u8>>, Bytes)>,
) -> &mut Self {
for (k, v) in pairs.into_iter() {
for (k, v) in pairs {
self.add_eip868_rlp_pair(k, v);
}
self
Expand Down
2 changes: 1 addition & 1 deletion crates/net/discv4/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -948,7 +948,7 @@ impl Discv4Service {
///
/// See [`Self::add_node`]
pub fn add_all_nodes(&mut self, records: impl IntoIterator<Item = NodeRecord>) {
for record in records.into_iter() {
for record in records {
self.add_node(record);
}
}
Expand Down
2 changes: 1 addition & 1 deletion crates/net/network/src/cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ where
T: Eq + Hash + fmt::Debug,
{
fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
for item in iter.into_iter() {
for item in iter {
_ = self.insert(item);
}
}
Expand Down
2 changes: 1 addition & 1 deletion crates/net/network/src/state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ where
let mut peers: Vec<_> = self.active_peers.iter_mut().collect();
peers.shuffle(&mut rand::thread_rng());

for (peer_id, peer) in peers.into_iter() {
for (peer_id, peer) in peers {
if peer.blocks.contains(&msg.hash) {
// skip peers which already reported the block
continue
Expand Down
4 changes: 2 additions & 2 deletions crates/net/network/src/transactions/fetcher.rs
Original file line number Diff line number Diff line change
Expand Up @@ -374,7 +374,7 @@ impl TransactionFetcher {
pub fn buffer_hashes(&mut self, hashes: RequestTxHashes, fallback_peer: Option<PeerId>) {
let mut max_retried_and_evicted_hashes = vec![];

for hash in hashes.into_iter() {
for hash in hashes {
// hash could have been evicted from bounded lru map
if self.hashes_fetch_inflight_and_pending_fetch.peek(&hash).is_none() {
continue
Expand Down Expand Up @@ -665,7 +665,7 @@ impl TransactionFetcher {
if self.hashes_pending_fetch.contains(hash) {
debug!(target: "net::tx", "`{}` should have been taken out of buffer before packing in a request, breaks invariant `@hashes_pending_fetch` and `@inflight_requests`, `@hashes_fetch_inflight_and_pending_fetch` for `{}`: {:?}",
format!("{:?}", new_announced_hashes), // Assuming new_announced_hashes can be debug-printed directly
format!("{:?}", new_announced_hashes),
format!("{:?}", new_announced_hashes),
new_announced_hashes.iter().map(|hash| {
let metadata = self.hashes_fetch_inflight_and_pending_fetch.get(hash);
// Assuming you only need `retries` and `tx_encoded_length` for debugging
Expand Down
2 changes: 1 addition & 1 deletion crates/net/p2p/src/full_block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -409,7 +409,7 @@ where

/// Inserts multiple block bodies.
fn insert_bodies(&mut self, bodies: impl IntoIterator<Item = BodyResponse>) {
for body in bodies.into_iter() {
for body in bodies {
self.insert_body(body);
}
}
Expand Down
2 changes: 1 addition & 1 deletion crates/primitives/src/stage/checkpoints.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ impl Compact for MerkleCheckpoint {

buf.put_u16(self.walker_stack.len() as u16);
len += 2;
for item in self.walker_stack.into_iter() {
for item in self.walker_stack {
len += item.to_compact(buf);
}

Expand Down
2 changes: 1 addition & 1 deletion crates/rpc/rpc/src/eth/api/transactions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -613,7 +613,7 @@ where

let mut evm = self.inner.evm_config.evm_with_env(db, env);
let mut index = 0;
for tx in transactions.into_iter() {
for tx in transactions {
if tx.hash() == target_tx_hash {
// reached the target transaction
break
Expand Down
2 changes: 1 addition & 1 deletion crates/stages/stages/src/stages/hashing_account.rs
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ impl<DB: Database> Stage<DB> for AccountHashingStage {
let chunk = chunk.collect::<Result<Vec<_>, _>>()?;
// Spawn the hashing task onto the global rayon pool
rayon::spawn(move || {
for (address, account) in chunk.into_iter() {
for (address, account) in chunk {
let address = address.key().unwrap();
let _ = tx.send((RawKey::new(keccak256(address)), account));
}
Expand Down
2 changes: 1 addition & 1 deletion crates/stages/stages/src/stages/hashing_storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ impl<DB: Database> Stage<DB> for StorageHashingStage {
let chunk = chunk.collect::<Result<Vec<_>, _>>()?;
// Spawn the hashing task onto the global rayon pool
rayon::spawn(move || {
for (address, slot) in chunk.into_iter() {
for (address, slot) in chunk {
let mut addr_key = Vec::with_capacity(64);
addr_key.put_slice(keccak256(address).as_slice());
addr_key.put_slice(keccak256(slot.key).as_slice());
Expand Down
4 changes: 2 additions & 2 deletions crates/stages/stages/src/stages/merkle.rs
Original file line number Diff line number Diff line change
Expand Up @@ -631,8 +631,8 @@ mod tests {
.or_default()
.insert(keccak256(entry.key), entry.value);
}
for (hashed_address, storage) in tree.into_iter() {
for (hashed_slot, value) in storage.into_iter() {
for (hashed_address, storage) in tree {
for (hashed_slot, value) in storage {
let storage_entry = storage_cursor
.seek_by_key_subkey(hashed_address, hashed_slot)
.unwrap();
Expand Down
8 changes: 4 additions & 4 deletions crates/storage/provider/src/bundle_state/state_changes.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ impl StateChanges {
tracing::trace!(target: "provider::bundle_state", len = self.0.accounts.len(), "Writing new account state");
let mut accounts_cursor = tx.cursor_write::<tables::PlainAccountState>()?;
// write account to database.
for (address, account) in self.0.accounts.into_iter() {
for (address, account) in self.0.accounts {
if let Some(account) = account {
tracing::trace!(target: "provider::bundle_state", ?address, "Updating plain state account");
accounts_cursor.upsert(address, into_reth_acc(account))?;
Expand All @@ -44,14 +44,14 @@ impl StateChanges {
// Write bytecode
tracing::trace!(target: "provider::bundle_state", len = self.0.contracts.len(), "Writing bytecodes");
let mut bytecodes_cursor = tx.cursor_write::<tables::Bytecodes>()?;
for (hash, bytecode) in self.0.contracts.into_iter() {
for (hash, bytecode) in self.0.contracts {
bytecodes_cursor.upsert(hash, Bytecode(bytecode))?;
}

// Write new storage state and wipe storage if needed.
tracing::trace!(target: "provider::bundle_state", len = self.0.storage.len(), "Writing new storage state");
let mut storages_cursor = tx.cursor_dup_write::<tables::PlainStorageState>()?;
for PlainStorageChangeset { address, wipe_storage, storage } in self.0.storage.into_iter() {
for PlainStorageChangeset { address, wipe_storage, storage } in self.0.storage {
// Wiping of storage.
if wipe_storage && storages_cursor.seek_exact(address)?.is_some() {
storages_cursor.delete_current_duplicates()?;
Expand All @@ -64,7 +64,7 @@ impl StateChanges {
// sort storage slots by key.
storage.par_sort_unstable_by_key(|a| a.key);

for entry in storage.into_iter() {
for entry in storage {
tracing::trace!(target: "provider::bundle_state", ?address, ?entry.key, "Updating plain state storage");
if let Some(db_entry) = storages_cursor.seek_by_key_subkey(address, entry.key)? {
if db_entry.key == entry.key {
Expand Down
3 changes: 1 addition & 2 deletions crates/storage/provider/src/bundle_state/state_reverts.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,7 @@ impl StateReverts {
tracing::trace!(target: "provider::reverts", block_number, "Writing block change");
// sort changes by address.
storage_changes.par_sort_unstable_by_key(|a| a.address);
for PlainStorageRevert { address, wiped, storage_revert } in storage_changes.into_iter()
{
for PlainStorageRevert { address, wiped, storage_revert } in storage_changes {
let storage_id = BlockNumberAddress((block_number, address));

let mut storage = storage_revert
Expand Down
2 changes: 1 addition & 1 deletion crates/storage/provider/src/providers/database/provider.rs
Original file line number Diff line number Diff line change
Expand Up @@ -524,7 +524,7 @@ impl<TX: DbTxMut + DbTx> DatabaseProvider<TX> {

let mut receipts = Vec::new();
// loop break if we are at the end of the blocks.
for (_, block_body) in block_bodies.into_iter() {
for (_, block_body) in block_bodies {
let mut block_receipts = Vec::with_capacity(block_body.tx_count as usize);
for _ in block_body.tx_num_range() {
if let Some((_, receipt)) = receipt_iter.next() {
Expand Down
6 changes: 3 additions & 3 deletions crates/storage/provider/src/test_utils/mock.rs
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ impl MockEthProvider {

/// Add multiple blocks to local block store
pub fn extend_blocks(&self, iter: impl IntoIterator<Item = (B256, Block)>) {
for (hash, block) in iter.into_iter() {
for (hash, block) in iter {
self.add_header(hash, block.header.clone());
self.add_block(hash, block)
}
Expand All @@ -111,7 +111,7 @@ impl MockEthProvider {

/// Add multiple headers to local header store
pub fn extend_headers(&self, iter: impl IntoIterator<Item = (B256, Header)>) {
for (hash, header) in iter.into_iter() {
for (hash, header) in iter {
self.add_header(hash, header)
}
}
Expand All @@ -123,7 +123,7 @@ impl MockEthProvider {

/// Add account to local account store
pub fn extend_accounts(&self, iter: impl IntoIterator<Item = (Address, ExtendedAccount)>) {
for (address, account) in iter.into_iter() {
for (address, account) in iter {
self.add_account(address, account)
}
}
Expand Down
4 changes: 2 additions & 2 deletions crates/transaction-pool/src/pool/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -851,7 +851,7 @@ impl PendingTransactionHashListener {
///
/// Returns false if the channel is closed (receiver dropped)
fn send_all(&self, hashes: impl IntoIterator<Item = TxHash>) -> bool {
for tx_hash in hashes.into_iter() {
for tx_hash in hashes {
match self.sender.try_send(tx_hash) {
Ok(()) => {}
Err(err) => {
Expand Down Expand Up @@ -892,7 +892,7 @@ impl<T: PoolTransaction> TransactionListener<T> {
///
/// Returns false if the channel is closed (receiver dropped)
fn send_all(&self, events: impl IntoIterator<Item = NewTransactionEvent<T>>) -> bool {
for event in events.into_iter() {
for event in events {
match self.sender.try_send(event) {
Ok(()) => {}
Err(err) => {
Expand Down
2 changes: 1 addition & 1 deletion crates/trie/trie/benches/hash_post_state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ fn generate_test_data(size: usize) -> HashMap<Address, BundleAccount> {

let mut bundle_builder = BundleBuilder::default();

for (address, storage) in state.into_iter() {
for (address, storage) in state {
bundle_builder = bundle_builder.state_storage(address, storage);
}

Expand Down

0 comments on commit f0a88d2

Please sign in to comment.