btrfs: remove num_entries atomic counter from delayed ref root

The atomic counter 'num_entries' is not used anymore, we increment it
and decrement it but then we don't ever read it to use for any logic.
Its last use was removed with commit 61a56a992f ("btrfs: delayed refs
pre-flushing should only run the heads we have"). So remove it.

Reviewed-by: Boris Burkov <boris@bur.io>
Reviewed-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Filipe Manana 2024-10-23 14:14:11 +01:00 committed by David Sterba
parent 055903c4e7
commit f7d4b4924d
4 changed files with 0 additions and 11 deletions

View File

@ -463,7 +463,6 @@ static inline void drop_delayed_ref(struct btrfs_fs_info *fs_info,
if (!list_empty(&ref->add_list))
list_del(&ref->add_list);
btrfs_put_delayed_ref(ref);
atomic_dec(&delayed_refs->num_entries);
btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
}
@ -604,7 +603,6 @@ void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
rb_erase_cached(&head->href_node, &delayed_refs->href_root);
RB_CLEAR_NODE(&head->href_node);
atomic_dec(&delayed_refs->num_entries);
delayed_refs->num_heads--;
if (!head->processing)
delayed_refs->num_heads_ready--;
@ -630,7 +628,6 @@ static bool insert_delayed_ref(struct btrfs_trans_handle *trans,
if (!exist) {
if (ref->action == BTRFS_ADD_DELAYED_REF)
list_add_tail(&ref->add_list, &href->ref_add_list);
atomic_inc(&root->num_entries);
spin_unlock(&href->lock);
trans->delayed_ref_updates++;
return false;
@ -901,7 +898,6 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
}
delayed_refs->num_heads++;
delayed_refs->num_heads_ready++;
atomic_inc(&delayed_refs->num_entries);
}
if (qrecord_inserted_ret)
*qrecord_inserted_ret = qrecord_inserted;

View File

@ -216,11 +216,6 @@ struct btrfs_delayed_ref_root {
/* this spin lock protects the rbtree and the entries inside */
spinlock_t lock;
/* how many delayed ref updates we've queued, used by the
* throttling code
*/
atomic_t num_entries;
/* total number of head nodes in tree */
unsigned long num_heads;

View File

@ -2029,7 +2029,6 @@ static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
default:
WARN_ON(1);
}
atomic_dec(&delayed_refs->num_entries);
/*
* Record the must_insert_reserved flag before we drop the

View File

@ -351,7 +351,6 @@ loop:
cur_trans->delayed_refs.href_root = RB_ROOT_CACHED;
xa_init(&cur_trans->delayed_refs.dirty_extents);
atomic_set(&cur_trans->delayed_refs.num_entries, 0);
/*
* although the tree mod log is per file system and not per transaction,