forked from Minki/linux
btrfs: qgroup: remove ASYNC_COMMIT mechanism in favor of reserve retry-after-EDQUOT
commit a514d63882
("btrfs: qgroup: Commit transaction in advance to
reduce early EDQUOT") tries to reduce the early EDQUOT problems by
checking the qgroup free against threshold and tries to wake up commit
kthread to free some space.
The problem of that mechanism is, it can only free qgroup per-trans
metadata space, can't do anything to data, nor prealloc qgroup space.
Now since we have the ability to flush qgroup space, and implemented
retry-after-EDQUOT behavior, such mechanism can be completely replaced.
So this patch will cleanup such mechanism in favor of
retry-after-EDQUOT.
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
c53e965360
commit
adca4d945c
@ -545,11 +545,6 @@ enum {
|
||||
* (device replace, resize, device add/delete, balance)
|
||||
*/
|
||||
BTRFS_FS_EXCL_OP,
|
||||
/*
|
||||
* To info transaction_kthread we need an immediate commit so it
|
||||
* doesn't need to wait for commit_interval
|
||||
*/
|
||||
BTRFS_FS_NEED_ASYNC_COMMIT,
|
||||
/*
|
||||
* Indicate that balance has been set up from the ioctl and is in the
|
||||
* main phase. The fs_info::balance_ctl is initialized.
|
||||
|
@ -1810,7 +1810,6 @@ static int transaction_kthread(void *arg)
|
||||
|
||||
now = ktime_get_seconds();
|
||||
if (cur->state < TRANS_STATE_COMMIT_START &&
|
||||
!test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) &&
|
||||
(now < cur->start_time ||
|
||||
now - cur->start_time < fs_info->commit_interval)) {
|
||||
spin_unlock(&fs_info->trans_lock);
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/btrfs.h>
|
||||
#include <linux/sizes.h>
|
||||
|
||||
#include "ctree.h"
|
||||
#include "transaction.h"
|
||||
@ -2895,20 +2894,8 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Two limits to commit transaction in advance.
|
||||
*
|
||||
* For RATIO, it will be 1/RATIO of the remaining limit as threshold.
|
||||
* For SIZE, it will be in byte unit as threshold.
|
||||
*/
|
||||
#define QGROUP_FREE_RATIO 32
|
||||
#define QGROUP_FREE_SIZE SZ_32M
|
||||
static bool qgroup_check_limits(struct btrfs_fs_info *fs_info,
|
||||
const struct btrfs_qgroup *qg, u64 num_bytes)
|
||||
static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes)
|
||||
{
|
||||
u64 free;
|
||||
u64 threshold;
|
||||
|
||||
if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
|
||||
qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer)
|
||||
return false;
|
||||
@ -2917,32 +2904,6 @@ static bool qgroup_check_limits(struct btrfs_fs_info *fs_info,
|
||||
qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Even if we passed the check, it's better to check if reservation
|
||||
* for meta_pertrans is pushing us near limit.
|
||||
* If there is too much pertrans reservation or it's near the limit,
|
||||
* let's try commit transaction to free some, using transaction_kthread
|
||||
*/
|
||||
if ((qg->lim_flags & (BTRFS_QGROUP_LIMIT_MAX_RFER |
|
||||
BTRFS_QGROUP_LIMIT_MAX_EXCL))) {
|
||||
if (qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
|
||||
free = qg->max_excl - qgroup_rsv_total(qg) - qg->excl;
|
||||
threshold = min_t(u64, qg->max_excl / QGROUP_FREE_RATIO,
|
||||
QGROUP_FREE_SIZE);
|
||||
} else {
|
||||
free = qg->max_rfer - qgroup_rsv_total(qg) - qg->rfer;
|
||||
threshold = min_t(u64, qg->max_rfer / QGROUP_FREE_RATIO,
|
||||
QGROUP_FREE_SIZE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Use transaction_kthread to commit transaction, so we no
|
||||
* longer need to bother nested transaction nor lock context.
|
||||
*/
|
||||
if (free < threshold)
|
||||
btrfs_commit_transaction_locksafe(fs_info);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2990,7 +2951,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
|
||||
|
||||
qg = unode_aux_to_qgroup(unode);
|
||||
|
||||
if (enforce && !qgroup_check_limits(fs_info, qg, num_bytes)) {
|
||||
if (enforce && !qgroup_check_limits(qg, num_bytes)) {
|
||||
ret = -EDQUOT;
|
||||
goto out;
|
||||
}
|
||||
|
@ -2351,7 +2351,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
|
||||
*/
|
||||
cur_trans->state = TRANS_STATE_COMPLETED;
|
||||
wake_up(&cur_trans->commit_wait);
|
||||
clear_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags);
|
||||
|
||||
spin_lock(&fs_info->trans_lock);
|
||||
list_del_init(&cur_trans->list);
|
||||
|
@ -210,20 +210,6 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root);
|
||||
int btrfs_commit_transaction(struct btrfs_trans_handle *trans);
|
||||
int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
|
||||
int wait_for_unblock);
|
||||
|
||||
/*
|
||||
* Try to commit transaction asynchronously, so this is safe to call
|
||||
* even holding a spinlock.
|
||||
*
|
||||
* It's done by informing transaction_kthread to commit transaction without
|
||||
* waiting for commit interval.
|
||||
*/
|
||||
static inline void btrfs_commit_transaction_locksafe(
|
||||
struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
set_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags);
|
||||
wake_up_process(fs_info->transaction_kthread);
|
||||
}
|
||||
int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans);
|
||||
int btrfs_should_end_transaction(struct btrfs_trans_handle *trans);
|
||||
void btrfs_throttle(struct btrfs_fs_info *fs_info);
|
||||
|
Loading…
Reference in New Issue
Block a user