xfs: move log discard work to xfs_discard.c

Because we are going to use the same list-based discard submission
interface for fstrim-based discards, too.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
This commit is contained in:
Dave Chinner 2023-10-04 09:24:02 +11:00 committed by Dave Chinner
parent 8a749fd1a8
commit 428c4435b0
5 changed files with 113 additions and 88 deletions

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* /*
* Copyright (C) 2010 Red Hat, Inc. * Copyright (C) 2010, 2023 Red Hat, Inc.
* All Rights Reserved. * All Rights Reserved.
*/ */
#include "xfs.h" #include "xfs.h"
@ -19,6 +19,81 @@
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_ag.h" #include "xfs_ag.h"
struct workqueue_struct *xfs_discard_wq;
static void
xfs_discard_endio_work(
struct work_struct *work)
{
struct xfs_busy_extents *extents =
container_of(work, struct xfs_busy_extents, endio_work);
xfs_extent_busy_clear(extents->mount, &extents->extent_list, false);
kmem_free(extents->owner);
}
/*
* Queue up the actual completion to a thread to avoid IRQ-safe locking for
* pagb_lock.
*/
static void
xfs_discard_endio(
struct bio *bio)
{
struct xfs_busy_extents *extents = bio->bi_private;
INIT_WORK(&extents->endio_work, xfs_discard_endio_work);
queue_work(xfs_discard_wq, &extents->endio_work);
bio_put(bio);
}
/*
* Walk the discard list and issue discards on all the busy extents in the
* list. We plug and chain the bios so that we only need a single completion
* call to clear all the busy extents once the discards are complete.
*/
int
xfs_discard_extents(
struct xfs_mount *mp,
struct xfs_busy_extents *extents)
{
struct xfs_extent_busy *busyp;
struct bio *bio = NULL;
struct blk_plug plug;
int error = 0;
blk_start_plug(&plug);
list_for_each_entry(busyp, &extents->extent_list, list) {
trace_xfs_discard_extent(mp, busyp->agno, busyp->bno,
busyp->length);
error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev,
XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno),
XFS_FSB_TO_BB(mp, busyp->length),
GFP_NOFS, &bio);
if (error && error != -EOPNOTSUPP) {
xfs_info(mp,
"discard failed for extent [0x%llx,%u], error %d",
(unsigned long long)busyp->bno,
busyp->length,
error);
break;
}
}
if (bio) {
bio->bi_private = extents;
bio->bi_end_io = xfs_discard_endio;
submit_bio(bio);
} else {
xfs_discard_endio_work(&extents->endio_work);
}
blk_finish_plug(&plug);
return error;
}
STATIC int STATIC int
xfs_trim_extents( xfs_trim_extents(
struct xfs_perag *pag, struct xfs_perag *pag,

View File

@ -3,8 +3,10 @@
#define XFS_DISCARD_H 1 #define XFS_DISCARD_H 1
struct fstrim_range; struct fstrim_range;
struct list_head; struct xfs_mount;
struct xfs_busy_extents;
extern int xfs_ioc_trim(struct xfs_mount *, struct fstrim_range __user *); int xfs_discard_extents(struct xfs_mount *mp, struct xfs_busy_extents *busy);
int xfs_ioc_trim(struct xfs_mount *mp, struct fstrim_range __user *fstrim);
#endif /* XFS_DISCARD_H */ #endif /* XFS_DISCARD_H */

View File

@ -16,9 +16,6 @@ struct xfs_alloc_arg;
/* /*
* Busy block/extent entry. Indexed by a rbtree in perag to mark blocks that * Busy block/extent entry. Indexed by a rbtree in perag to mark blocks that
* have been freed but whose transactions aren't committed to disk yet. * have been freed but whose transactions aren't committed to disk yet.
*
* Note that we use the transaction ID to record the transaction, not the
* transaction structure itself. See xfs_extent_busy_insert() for details.
*/ */
struct xfs_extent_busy { struct xfs_extent_busy {
struct rb_node rb_node; /* ag by-bno indexed search tree */ struct rb_node rb_node; /* ag by-bno indexed search tree */
@ -31,6 +28,23 @@ struct xfs_extent_busy {
#define XFS_EXTENT_BUSY_SKIP_DISCARD 0x02 /* do not discard */ #define XFS_EXTENT_BUSY_SKIP_DISCARD 0x02 /* do not discard */
}; };
/*
* List used to track groups of related busy extents all the way through
* to discard completion.
*/
struct xfs_busy_extents {
struct xfs_mount *mount;
struct list_head extent_list;
struct work_struct endio_work;
/*
* Owner is the object containing the struct xfs_busy_extents to free
* once the busy extents have been processed. If only the
* xfs_busy_extents object needs freeing, then point this at itself.
*/
void *owner;
};
void void
xfs_extent_busy_insert(struct xfs_trans *tp, struct xfs_perag *pag, xfs_extent_busy_insert(struct xfs_trans *tp, struct xfs_perag *pag,
xfs_agblock_t bno, xfs_extlen_t len, unsigned int flags); xfs_agblock_t bno, xfs_extlen_t len, unsigned int flags);

View File

@ -16,8 +16,7 @@
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_log_priv.h" #include "xfs_log_priv.h"
#include "xfs_trace.h" #include "xfs_trace.h"
#include "xfs_discard.h"
struct workqueue_struct *xfs_discard_wq;
/* /*
* Allocate a new ticket. Failing to get a new ticket makes it really hard to * Allocate a new ticket. Failing to get a new ticket makes it really hard to
@ -103,7 +102,7 @@ xlog_cil_ctx_alloc(void)
ctx = kmem_zalloc(sizeof(*ctx), KM_NOFS); ctx = kmem_zalloc(sizeof(*ctx), KM_NOFS);
INIT_LIST_HEAD(&ctx->committing); INIT_LIST_HEAD(&ctx->committing);
INIT_LIST_HEAD(&ctx->busy_extents); INIT_LIST_HEAD(&ctx->busy_extents.extent_list);
INIT_LIST_HEAD(&ctx->log_items); INIT_LIST_HEAD(&ctx->log_items);
INIT_LIST_HEAD(&ctx->lv_chain); INIT_LIST_HEAD(&ctx->lv_chain);
INIT_WORK(&ctx->push_work, xlog_cil_push_work); INIT_WORK(&ctx->push_work, xlog_cil_push_work);
@ -132,7 +131,7 @@ xlog_cil_push_pcp_aggregate(
if (!list_empty(&cilpcp->busy_extents)) { if (!list_empty(&cilpcp->busy_extents)) {
list_splice_init(&cilpcp->busy_extents, list_splice_init(&cilpcp->busy_extents,
&ctx->busy_extents); &ctx->busy_extents.extent_list);
} }
if (!list_empty(&cilpcp->log_items)) if (!list_empty(&cilpcp->log_items))
list_splice_init(&cilpcp->log_items, &ctx->log_items); list_splice_init(&cilpcp->log_items, &ctx->log_items);
@ -708,76 +707,6 @@ xlog_cil_free_logvec(
} }
} }
static void
xlog_discard_endio_work(
struct work_struct *work)
{
struct xfs_cil_ctx *ctx =
container_of(work, struct xfs_cil_ctx, discard_endio_work);
struct xfs_mount *mp = ctx->cil->xc_log->l_mp;
xfs_extent_busy_clear(mp, &ctx->busy_extents, false);
kmem_free(ctx);
}
/*
* Queue up the actual completion to a thread to avoid IRQ-safe locking for
* pagb_lock. Note that we need a unbounded workqueue, otherwise we might
* get the execution delayed up to 30 seconds for weird reasons.
*/
static void
xlog_discard_endio(
struct bio *bio)
{
struct xfs_cil_ctx *ctx = bio->bi_private;
INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work);
queue_work(xfs_discard_wq, &ctx->discard_endio_work);
bio_put(bio);
}
static void
xlog_discard_busy_extents(
struct xfs_mount *mp,
struct xfs_cil_ctx *ctx)
{
struct list_head *list = &ctx->busy_extents;
struct xfs_extent_busy *busyp;
struct bio *bio = NULL;
struct blk_plug plug;
int error = 0;
ASSERT(xfs_has_discard(mp));
blk_start_plug(&plug);
list_for_each_entry(busyp, list, list) {
trace_xfs_discard_extent(mp, busyp->agno, busyp->bno,
busyp->length);
error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev,
XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno),
XFS_FSB_TO_BB(mp, busyp->length),
GFP_NOFS, &bio);
if (error && error != -EOPNOTSUPP) {
xfs_info(mp,
"discard failed for extent [0x%llx,%u], error %d",
(unsigned long long)busyp->bno,
busyp->length,
error);
break;
}
}
if (bio) {
bio->bi_private = ctx;
bio->bi_end_io = xlog_discard_endio;
submit_bio(bio);
} else {
xlog_discard_endio_work(&ctx->discard_endio_work);
}
blk_finish_plug(&plug);
}
/* /*
* Mark all items committed and clear busy extents. We free the log vector * Mark all items committed and clear busy extents. We free the log vector
* chains in a separate pass so that we unpin the log items as quickly as * chains in a separate pass so that we unpin the log items as quickly as
@ -807,8 +736,8 @@ xlog_cil_committed(
xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, &ctx->lv_chain, xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, &ctx->lv_chain,
ctx->start_lsn, abort); ctx->start_lsn, abort);
xfs_extent_busy_sort(&ctx->busy_extents); xfs_extent_busy_sort(&ctx->busy_extents.extent_list);
xfs_extent_busy_clear(mp, &ctx->busy_extents, xfs_extent_busy_clear(mp, &ctx->busy_extents.extent_list,
xfs_has_discard(mp) && !abort); xfs_has_discard(mp) && !abort);
spin_lock(&ctx->cil->xc_push_lock); spin_lock(&ctx->cil->xc_push_lock);
@ -817,10 +746,14 @@ xlog_cil_committed(
xlog_cil_free_logvec(&ctx->lv_chain); xlog_cil_free_logvec(&ctx->lv_chain);
if (!list_empty(&ctx->busy_extents)) if (!list_empty(&ctx->busy_extents.extent_list)) {
xlog_discard_busy_extents(mp, ctx); ctx->busy_extents.mount = mp;
else ctx->busy_extents.owner = ctx;
kmem_free(ctx); xfs_discard_extents(mp, &ctx->busy_extents);
return;
}
kmem_free(ctx);
} }
void void

View File

@ -6,6 +6,8 @@
#ifndef __XFS_LOG_PRIV_H__ #ifndef __XFS_LOG_PRIV_H__
#define __XFS_LOG_PRIV_H__ #define __XFS_LOG_PRIV_H__
#include "xfs_extent_busy.h" /* for struct xfs_busy_extents */
struct xfs_buf; struct xfs_buf;
struct xlog; struct xlog;
struct xlog_ticket; struct xlog_ticket;
@ -223,12 +225,11 @@ struct xfs_cil_ctx {
struct xlog_in_core *commit_iclog; struct xlog_in_core *commit_iclog;
struct xlog_ticket *ticket; /* chkpt ticket */ struct xlog_ticket *ticket; /* chkpt ticket */
atomic_t space_used; /* aggregate size of regions */ atomic_t space_used; /* aggregate size of regions */
struct list_head busy_extents; /* busy extents in chkpt */ struct xfs_busy_extents busy_extents;
struct list_head log_items; /* log items in chkpt */ struct list_head log_items; /* log items in chkpt */
struct list_head lv_chain; /* logvecs being pushed */ struct list_head lv_chain; /* logvecs being pushed */
struct list_head iclog_entry; struct list_head iclog_entry;
struct list_head committing; /* ctx committing list */ struct list_head committing; /* ctx committing list */
struct work_struct discard_endio_work;
struct work_struct push_work; struct work_struct push_work;
atomic_t order_id; atomic_t order_id;