2018-06-06 02:42:14 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2005-11-02 03:58:39 +00:00
|
|
|
* Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
|
|
|
|
* All Rights Reserved.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
#include "xfs.h"
|
2005-11-02 03:38:42 +00:00
|
|
|
#include "xfs_fs.h"
|
2019-06-29 02:25:35 +00:00
|
|
|
#include "xfs_shared.h"
|
2013-10-22 23:51:50 +00:00
|
|
|
#include "xfs_format.h"
|
2013-10-22 23:50:10 +00:00
|
|
|
#include "xfs_log_format.h"
|
|
|
|
#include "xfs_trans_resv.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
#include "xfs_mount.h"
|
|
|
|
#include "xfs_inode.h"
|
2013-10-22 23:50:10 +00:00
|
|
|
#include "xfs_trans.h"
|
2005-11-02 03:38:42 +00:00
|
|
|
#include "xfs_inode_item.h"
|
2009-12-14 23:14:59 +00:00
|
|
|
#include "xfs_trace.h"
|
2013-10-22 23:50:10 +00:00
|
|
|
#include "xfs_trans_priv.h"
|
xfs: Properly retry failed inode items in case of error during buffer writeback
When a buffer has been failed during writeback, the inode items into it
are kept flush locked, and are never resubmitted due the flush lock, so,
if any buffer fails to be written, the items in AIL are never written to
disk and never unlocked.
This causes unmount operation to hang due these items flush locked in AIL,
but this also causes the items in AIL to never be written back, even when
the IO device comes back to normal.
I've been testing this patch with a DM-thin device, creating a
filesystem larger than the real device.
When writing enough data to fill the DM-thin device, XFS receives ENOSPC
errors from the device, and keep spinning on xfsaild (when 'retry
forever' configuration is set).
At this point, the filesystem can not be unmounted because of the flush locked
items in AIL, but worse, the items in AIL are never retried at all
(once xfs_inode_item_push() will skip the items that are flush locked),
even if the underlying DM-thin device is expanded to the proper size.
This patch fixes both cases, retrying any item that has been failed
previously, using the infra-structure provided by the previous patch.
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Carlos Maiolino <cmaiolino@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2017-08-09 01:21:50 +00:00
|
|
|
#include "xfs_buf_item.h"
|
2013-12-13 00:00:43 +00:00
|
|
|
#include "xfs_log.h"
|
2019-11-02 16:40:53 +00:00
|
|
|
#include "xfs_error.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2017-12-11 11:35:19 +00:00
|
|
|
#include <linux/iversion.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
kmem_zone_t *xfs_ili_zone; /* inode log item zone */
|
|
|
|
|
2010-06-23 08:11:15 +00:00
|
|
|
static inline struct xfs_inode_log_item *INODE_ITEM(struct xfs_log_item *lip)
|
|
|
|
{
|
|
|
|
return container_of(lip, struct xfs_inode_log_item, ili_item);
|
|
|
|
}
|
|
|
|
|
2013-08-12 10:50:04 +00:00
|
|
|
STATIC void
|
2013-12-13 00:00:43 +00:00
|
|
|
xfs_inode_item_data_fork_size(
|
|
|
|
struct xfs_inode_log_item *iip,
|
2013-08-12 10:50:04 +00:00
|
|
|
int *nvecs,
|
|
|
|
int *nbytes)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2010-06-23 08:11:15 +00:00
|
|
|
struct xfs_inode *ip = iip->ili_inode;
|
2013-08-12 10:50:04 +00:00
|
|
|
|
2020-05-18 17:28:05 +00:00
|
|
|
switch (ip->i_df.if_format) {
|
2005-04-16 22:20:36 +00:00
|
|
|
case XFS_DINODE_FMT_EXTENTS:
|
2012-02-29 09:53:54 +00:00
|
|
|
if ((iip->ili_fields & XFS_ILOG_DEXT) &&
|
2020-05-18 17:27:22 +00:00
|
|
|
ip->i_df.if_nextents > 0 &&
|
2013-08-12 10:50:04 +00:00
|
|
|
ip->i_df.if_bytes > 0) {
|
|
|
|
/* worst case, doesn't subtract delalloc extents */
|
|
|
|
*nbytes += XFS_IFORK_DSIZE(ip);
|
|
|
|
*nvecs += 1;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
case XFS_DINODE_FMT_BTREE:
|
2012-02-29 09:53:54 +00:00
|
|
|
if ((iip->ili_fields & XFS_ILOG_DBROOT) &&
|
2013-08-12 10:50:04 +00:00
|
|
|
ip->i_df.if_broot_bytes > 0) {
|
|
|
|
*nbytes += ip->i_df.if_broot_bytes;
|
|
|
|
*nvecs += 1;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
case XFS_DINODE_FMT_LOCAL:
|
2012-02-29 09:53:54 +00:00
|
|
|
if ((iip->ili_fields & XFS_ILOG_DDATA) &&
|
2013-08-12 10:50:04 +00:00
|
|
|
ip->i_df.if_bytes > 0) {
|
|
|
|
*nbytes += roundup(ip->i_df.if_bytes, 4);
|
|
|
|
*nvecs += 1;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case XFS_DINODE_FMT_DEV:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ASSERT(0);
|
|
|
|
break;
|
|
|
|
}
|
2013-12-13 00:00:43 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-12-13 00:00:43 +00:00
|
|
|
STATIC void
|
|
|
|
xfs_inode_item_attr_fork_size(
|
|
|
|
struct xfs_inode_log_item *iip,
|
|
|
|
int *nvecs,
|
|
|
|
int *nbytes)
|
|
|
|
{
|
|
|
|
struct xfs_inode *ip = iip->ili_inode;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2020-05-18 17:28:05 +00:00
|
|
|
switch (ip->i_afp->if_format) {
|
2005-04-16 22:20:36 +00:00
|
|
|
case XFS_DINODE_FMT_EXTENTS:
|
2012-02-29 09:53:54 +00:00
|
|
|
if ((iip->ili_fields & XFS_ILOG_AEXT) &&
|
2020-05-18 17:27:22 +00:00
|
|
|
ip->i_afp->if_nextents > 0 &&
|
2013-08-12 10:50:04 +00:00
|
|
|
ip->i_afp->if_bytes > 0) {
|
|
|
|
/* worst case, doesn't subtract unused space */
|
|
|
|
*nbytes += XFS_IFORK_ASIZE(ip);
|
|
|
|
*nvecs += 1;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
case XFS_DINODE_FMT_BTREE:
|
2012-02-29 09:53:54 +00:00
|
|
|
if ((iip->ili_fields & XFS_ILOG_ABROOT) &&
|
2013-08-12 10:50:04 +00:00
|
|
|
ip->i_afp->if_broot_bytes > 0) {
|
|
|
|
*nbytes += ip->i_afp->if_broot_bytes;
|
|
|
|
*nvecs += 1;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
case XFS_DINODE_FMT_LOCAL:
|
2012-02-29 09:53:54 +00:00
|
|
|
if ((iip->ili_fields & XFS_ILOG_ADATA) &&
|
2013-08-12 10:50:04 +00:00
|
|
|
ip->i_afp->if_bytes > 0) {
|
|
|
|
*nbytes += roundup(ip->i_afp->if_bytes, 4);
|
|
|
|
*nvecs += 1;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ASSERT(0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-13 00:00:43 +00:00
|
|
|
/*
|
|
|
|
* This returns the number of iovecs needed to log the given inode item.
|
|
|
|
*
|
|
|
|
* We need one iovec for the inode log format structure, one for the
|
|
|
|
* inode core, and possibly one for the inode data/extents/b-tree root
|
|
|
|
* and one for the inode attribute data/extents/b-tree root.
|
|
|
|
*/
|
|
|
|
STATIC void
|
|
|
|
xfs_inode_item_size(
|
|
|
|
struct xfs_log_item *lip,
|
|
|
|
int *nvecs,
|
|
|
|
int *nbytes)
|
|
|
|
{
|
|
|
|
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
|
|
|
|
struct xfs_inode *ip = iip->ili_inode;
|
|
|
|
|
|
|
|
*nvecs += 2;
|
|
|
|
*nbytes += sizeof(struct xfs_inode_log_format) +
|
2020-03-18 15:15:10 +00:00
|
|
|
xfs_log_dinode_size(ip->i_mount);
|
2013-12-13 00:00:43 +00:00
|
|
|
|
|
|
|
xfs_inode_item_data_fork_size(iip, nvecs, nbytes);
|
|
|
|
if (XFS_IFORK_Q(ip))
|
|
|
|
xfs_inode_item_attr_fork_size(iip, nvecs, nbytes);
|
|
|
|
}
|
|
|
|
|
2013-12-13 00:00:43 +00:00
|
|
|
STATIC void
|
2013-12-13 00:00:43 +00:00
|
|
|
xfs_inode_item_format_data_fork(
|
|
|
|
struct xfs_inode_log_item *iip,
|
2013-12-13 00:34:02 +00:00
|
|
|
struct xfs_inode_log_format *ilf,
|
|
|
|
struct xfs_log_vec *lv,
|
|
|
|
struct xfs_log_iovec **vecp)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2010-06-23 08:11:15 +00:00
|
|
|
struct xfs_inode *ip = iip->ili_inode;
|
2005-04-16 22:20:36 +00:00
|
|
|
size_t data_bytes;
|
|
|
|
|
2020-05-18 17:28:05 +00:00
|
|
|
switch (ip->i_df.if_format) {
|
2005-04-16 22:20:36 +00:00
|
|
|
case XFS_DINODE_FMT_EXTENTS:
|
2012-02-29 09:53:54 +00:00
|
|
|
iip->ili_fields &=
|
2017-10-19 18:07:09 +00:00
|
|
|
~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | XFS_ILOG_DEV);
|
2012-02-29 09:53:53 +00:00
|
|
|
|
2012-02-29 09:53:54 +00:00
|
|
|
if ((iip->ili_fields & XFS_ILOG_DEXT) &&
|
2020-05-18 17:27:22 +00:00
|
|
|
ip->i_df.if_nextents > 0 &&
|
2012-02-29 09:53:53 +00:00
|
|
|
ip->i_df.if_bytes > 0) {
|
2013-12-13 00:34:04 +00:00
|
|
|
struct xfs_bmbt_rec *p;
|
|
|
|
|
2016-11-08 01:59:42 +00:00
|
|
|
ASSERT(xfs_iext_count(&ip->i_df) > 0);
|
2013-12-13 00:34:04 +00:00
|
|
|
|
|
|
|
p = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_IEXT);
|
|
|
|
data_bytes = xfs_iextents_copy(ip, p, XFS_DATA_FORK);
|
|
|
|
xlog_finish_iovec(lv, *vecp, data_bytes);
|
|
|
|
|
|
|
|
ASSERT(data_bytes <= ip->i_df.if_bytes);
|
|
|
|
|
|
|
|
ilf->ilf_dsize = data_bytes;
|
2013-12-13 00:34:02 +00:00
|
|
|
ilf->ilf_size++;
|
2012-02-29 09:53:53 +00:00
|
|
|
} else {
|
2012-02-29 09:53:54 +00:00
|
|
|
iip->ili_fields &= ~XFS_ILOG_DEXT;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case XFS_DINODE_FMT_BTREE:
|
2012-02-29 09:53:54 +00:00
|
|
|
iip->ili_fields &=
|
2017-10-19 18:07:09 +00:00
|
|
|
~(XFS_ILOG_DDATA | XFS_ILOG_DEXT | XFS_ILOG_DEV);
|
2012-02-29 09:53:53 +00:00
|
|
|
|
2012-02-29 09:53:54 +00:00
|
|
|
if ((iip->ili_fields & XFS_ILOG_DBROOT) &&
|
2012-02-29 09:53:53 +00:00
|
|
|
ip->i_df.if_broot_bytes > 0) {
|
2005-04-16 22:20:36 +00:00
|
|
|
ASSERT(ip->i_df.if_broot != NULL);
|
2013-12-13 00:34:02 +00:00
|
|
|
xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_IBROOT,
|
2013-12-13 00:00:43 +00:00
|
|
|
ip->i_df.if_broot,
|
|
|
|
ip->i_df.if_broot_bytes);
|
2013-12-13 00:34:02 +00:00
|
|
|
ilf->ilf_dsize = ip->i_df.if_broot_bytes;
|
|
|
|
ilf->ilf_size++;
|
2012-02-29 09:53:53 +00:00
|
|
|
} else {
|
2012-02-29 09:53:54 +00:00
|
|
|
ASSERT(!(iip->ili_fields &
|
2012-02-29 09:53:53 +00:00
|
|
|
XFS_ILOG_DBROOT));
|
2012-02-29 09:53:54 +00:00
|
|
|
iip->ili_fields &= ~XFS_ILOG_DBROOT;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case XFS_DINODE_FMT_LOCAL:
|
2012-02-29 09:53:54 +00:00
|
|
|
iip->ili_fields &=
|
2017-10-19 18:07:09 +00:00
|
|
|
~(XFS_ILOG_DEXT | XFS_ILOG_DBROOT | XFS_ILOG_DEV);
|
2012-02-29 09:53:54 +00:00
|
|
|
if ((iip->ili_fields & XFS_ILOG_DDATA) &&
|
2012-02-29 09:53:53 +00:00
|
|
|
ip->i_df.if_bytes > 0) {
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Round i_bytes up to a word boundary.
|
2020-08-05 15:49:58 +00:00
|
|
|
* The underlying memory is guaranteed
|
2005-04-16 22:20:36 +00:00
|
|
|
* to be there by xfs_idata_realloc().
|
|
|
|
*/
|
|
|
|
data_bytes = roundup(ip->i_df.if_bytes, 4);
|
2013-12-13 00:00:43 +00:00
|
|
|
ASSERT(ip->i_df.if_u1.if_data != NULL);
|
|
|
|
ASSERT(ip->i_d.di_size > 0);
|
2013-12-13 00:34:02 +00:00
|
|
|
xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_ILOCAL,
|
2013-12-13 00:00:43 +00:00
|
|
|
ip->i_df.if_u1.if_data, data_bytes);
|
2013-12-13 00:34:02 +00:00
|
|
|
ilf->ilf_dsize = (unsigned)data_bytes;
|
|
|
|
ilf->ilf_size++;
|
2012-02-29 09:53:53 +00:00
|
|
|
} else {
|
2012-02-29 09:53:54 +00:00
|
|
|
iip->ili_fields &= ~XFS_ILOG_DDATA;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case XFS_DINODE_FMT_DEV:
|
2012-02-29 09:53:54 +00:00
|
|
|
iip->ili_fields &=
|
2017-10-19 18:07:09 +00:00
|
|
|
~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | XFS_ILOG_DEXT);
|
2013-12-13 00:34:02 +00:00
|
|
|
if (iip->ili_fields & XFS_ILOG_DEV)
|
2017-10-19 18:07:09 +00:00
|
|
|
ilf->ilf_u.ilfu_rdev = sysv_encode_dev(VFS_I(ip)->i_rdev);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ASSERT(0);
|
|
|
|
break;
|
|
|
|
}
|
2013-12-13 00:00:43 +00:00
|
|
|
}
|
|
|
|
|
2013-12-13 00:00:43 +00:00
|
|
|
STATIC void
|
2013-12-13 00:00:43 +00:00
|
|
|
xfs_inode_item_format_attr_fork(
|
|
|
|
struct xfs_inode_log_item *iip,
|
2013-12-13 00:34:02 +00:00
|
|
|
struct xfs_inode_log_format *ilf,
|
|
|
|
struct xfs_log_vec *lv,
|
|
|
|
struct xfs_log_iovec **vecp)
|
2013-12-13 00:00:43 +00:00
|
|
|
{
|
|
|
|
struct xfs_inode *ip = iip->ili_inode;
|
|
|
|
size_t data_bytes;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2020-05-18 17:28:05 +00:00
|
|
|
switch (ip->i_afp->if_format) {
|
2005-04-16 22:20:36 +00:00
|
|
|
case XFS_DINODE_FMT_EXTENTS:
|
2012-02-29 09:53:54 +00:00
|
|
|
iip->ili_fields &=
|
2012-02-29 09:53:53 +00:00
|
|
|
~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT);
|
|
|
|
|
2012-02-29 09:53:54 +00:00
|
|
|
if ((iip->ili_fields & XFS_ILOG_AEXT) &&
|
2020-05-18 17:27:22 +00:00
|
|
|
ip->i_afp->if_nextents > 0 &&
|
2012-02-29 09:53:53 +00:00
|
|
|
ip->i_afp->if_bytes > 0) {
|
2013-12-13 00:34:04 +00:00
|
|
|
struct xfs_bmbt_rec *p;
|
|
|
|
|
2016-11-08 01:59:42 +00:00
|
|
|
ASSERT(xfs_iext_count(ip->i_afp) ==
|
2020-05-18 17:27:22 +00:00
|
|
|
ip->i_afp->if_nextents);
|
2013-12-13 00:34:04 +00:00
|
|
|
|
|
|
|
p = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_EXT);
|
|
|
|
data_bytes = xfs_iextents_copy(ip, p, XFS_ATTR_FORK);
|
|
|
|
xlog_finish_iovec(lv, *vecp, data_bytes);
|
|
|
|
|
|
|
|
ilf->ilf_asize = data_bytes;
|
2013-12-13 00:34:02 +00:00
|
|
|
ilf->ilf_size++;
|
2012-02-29 09:53:53 +00:00
|
|
|
} else {
|
2012-02-29 09:53:54 +00:00
|
|
|
iip->ili_fields &= ~XFS_ILOG_AEXT;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case XFS_DINODE_FMT_BTREE:
|
2012-02-29 09:53:54 +00:00
|
|
|
iip->ili_fields &=
|
2012-02-29 09:53:53 +00:00
|
|
|
~(XFS_ILOG_ADATA | XFS_ILOG_AEXT);
|
|
|
|
|
2012-02-29 09:53:54 +00:00
|
|
|
if ((iip->ili_fields & XFS_ILOG_ABROOT) &&
|
2012-02-29 09:53:53 +00:00
|
|
|
ip->i_afp->if_broot_bytes > 0) {
|
2005-04-16 22:20:36 +00:00
|
|
|
ASSERT(ip->i_afp->if_broot != NULL);
|
2012-02-29 09:53:53 +00:00
|
|
|
|
2013-12-13 00:34:02 +00:00
|
|
|
xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_BROOT,
|
2013-12-13 00:00:43 +00:00
|
|
|
ip->i_afp->if_broot,
|
|
|
|
ip->i_afp->if_broot_bytes);
|
2013-12-13 00:34:02 +00:00
|
|
|
ilf->ilf_asize = ip->i_afp->if_broot_bytes;
|
|
|
|
ilf->ilf_size++;
|
2012-02-29 09:53:53 +00:00
|
|
|
} else {
|
2012-02-29 09:53:54 +00:00
|
|
|
iip->ili_fields &= ~XFS_ILOG_ABROOT;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case XFS_DINODE_FMT_LOCAL:
|
2012-02-29 09:53:54 +00:00
|
|
|
iip->ili_fields &=
|
2012-02-29 09:53:53 +00:00
|
|
|
~(XFS_ILOG_AEXT | XFS_ILOG_ABROOT);
|
|
|
|
|
2012-02-29 09:53:54 +00:00
|
|
|
if ((iip->ili_fields & XFS_ILOG_ADATA) &&
|
2012-02-29 09:53:53 +00:00
|
|
|
ip->i_afp->if_bytes > 0) {
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Round i_bytes up to a word boundary.
|
2020-08-05 15:49:58 +00:00
|
|
|
* The underlying memory is guaranteed
|
2005-04-16 22:20:36 +00:00
|
|
|
* to be there by xfs_idata_realloc().
|
|
|
|
*/
|
|
|
|
data_bytes = roundup(ip->i_afp->if_bytes, 4);
|
2013-12-13 00:00:43 +00:00
|
|
|
ASSERT(ip->i_afp->if_u1.if_data != NULL);
|
2013-12-13 00:34:02 +00:00
|
|
|
xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_LOCAL,
|
2013-12-13 00:00:43 +00:00
|
|
|
ip->i_afp->if_u1.if_data,
|
|
|
|
data_bytes);
|
2013-12-13 00:34:02 +00:00
|
|
|
ilf->ilf_asize = (unsigned)data_bytes;
|
|
|
|
ilf->ilf_size++;
|
2012-02-29 09:53:53 +00:00
|
|
|
} else {
|
2012-02-29 09:53:54 +00:00
|
|
|
iip->ili_fields &= ~XFS_ILOG_ADATA;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ASSERT(0);
|
|
|
|
break;
|
|
|
|
}
|
2013-12-13 00:00:43 +00:00
|
|
|
}
|
|
|
|
|
2020-08-24 23:01:34 +00:00
|
|
|
/*
|
|
|
|
* Convert an incore timestamp to a log timestamp. Note that the log format
|
|
|
|
* specifies host endian format!
|
|
|
|
*/
|
|
|
|
static inline xfs_ictimestamp_t
|
|
|
|
xfs_inode_to_log_dinode_ts(
|
2020-08-17 16:59:07 +00:00
|
|
|
struct xfs_inode *ip,
|
2020-08-24 23:01:34 +00:00
|
|
|
const struct timespec64 tv)
|
|
|
|
{
|
|
|
|
struct xfs_legacy_ictimestamp *lits;
|
|
|
|
xfs_ictimestamp_t its;
|
|
|
|
|
2020-08-17 16:59:07 +00:00
|
|
|
if (xfs_inode_has_bigtime(ip))
|
|
|
|
return xfs_inode_encode_bigtime(tv);
|
|
|
|
|
2020-08-24 23:01:34 +00:00
|
|
|
lits = (struct xfs_legacy_ictimestamp *)&its;
|
|
|
|
lits->t_sec = tv.tv_sec;
|
|
|
|
lits->t_nsec = tv.tv_nsec;
|
|
|
|
|
|
|
|
return its;
|
|
|
|
}
|
|
|
|
|
2016-02-09 05:54:58 +00:00
|
|
|
static void
|
2016-02-09 05:54:58 +00:00
|
|
|
xfs_inode_to_log_dinode(
|
|
|
|
struct xfs_inode *ip,
|
2016-02-09 05:54:58 +00:00
|
|
|
struct xfs_log_dinode *to,
|
|
|
|
xfs_lsn_t lsn)
|
2016-02-09 05:54:58 +00:00
|
|
|
{
|
2016-02-09 05:54:58 +00:00
|
|
|
struct xfs_icdinode *from = &ip->i_d;
|
|
|
|
struct inode *inode = VFS_I(ip);
|
|
|
|
|
2016-02-09 05:54:58 +00:00
|
|
|
to->di_magic = XFS_DINODE_MAGIC;
|
2020-05-18 17:28:05 +00:00
|
|
|
to->di_format = xfs_ifork_format(&ip->i_df);
|
2020-02-21 16:31:27 +00:00
|
|
|
to->di_uid = i_uid_read(inode);
|
|
|
|
to->di_gid = i_gid_read(inode);
|
2019-11-12 16:22:54 +00:00
|
|
|
to->di_projid_lo = from->di_projid & 0xffff;
|
|
|
|
to->di_projid_hi = from->di_projid >> 16;
|
2016-02-09 05:54:58 +00:00
|
|
|
|
2016-02-09 05:54:58 +00:00
|
|
|
memset(to->di_pad, 0, sizeof(to->di_pad));
|
2016-02-09 05:54:58 +00:00
|
|
|
memset(to->di_pad3, 0, sizeof(to->di_pad3));
|
2020-08-17 16:59:07 +00:00
|
|
|
to->di_atime = xfs_inode_to_log_dinode_ts(ip, inode->i_atime);
|
|
|
|
to->di_mtime = xfs_inode_to_log_dinode_ts(ip, inode->i_mtime);
|
|
|
|
to->di_ctime = xfs_inode_to_log_dinode_ts(ip, inode->i_ctime);
|
2016-02-09 05:54:58 +00:00
|
|
|
to->di_nlink = inode->i_nlink;
|
2016-02-09 05:54:58 +00:00
|
|
|
to->di_gen = inode->i_generation;
|
2016-02-09 05:54:58 +00:00
|
|
|
to->di_mode = inode->i_mode;
|
2016-02-09 05:54:58 +00:00
|
|
|
|
|
|
|
to->di_size = from->di_size;
|
|
|
|
to->di_nblocks = from->di_nblocks;
|
|
|
|
to->di_extsize = from->di_extsize;
|
2020-05-18 17:27:22 +00:00
|
|
|
to->di_nextents = xfs_ifork_nextents(&ip->i_df);
|
|
|
|
to->di_anextents = xfs_ifork_nextents(ip->i_afp);
|
2016-02-09 05:54:58 +00:00
|
|
|
to->di_forkoff = from->di_forkoff;
|
2020-05-18 17:28:05 +00:00
|
|
|
to->di_aformat = xfs_ifork_format(ip->i_afp);
|
2016-02-09 05:54:58 +00:00
|
|
|
to->di_dmevmask = from->di_dmevmask;
|
|
|
|
to->di_dmstate = from->di_dmstate;
|
|
|
|
to->di_flags = from->di_flags;
|
|
|
|
|
2017-10-09 18:37:22 +00:00
|
|
|
/* log a dummy value to ensure log structure is fully initialised */
|
|
|
|
to->di_next_unlinked = NULLAGINO;
|
|
|
|
|
2020-03-18 15:15:11 +00:00
|
|
|
if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
|
|
|
|
to->di_version = 3;
|
2017-12-11 11:35:19 +00:00
|
|
|
to->di_changecount = inode_peek_iversion(inode);
|
2020-08-17 16:59:07 +00:00
|
|
|
to->di_crtime = xfs_inode_to_log_dinode_ts(ip, from->di_crtime);
|
2016-02-09 05:54:58 +00:00
|
|
|
to->di_flags2 = from->di_flags2;
|
2016-10-03 16:11:43 +00:00
|
|
|
to->di_cowextsize = from->di_cowextsize;
|
2016-02-09 05:54:58 +00:00
|
|
|
to->di_ino = ip->i_ino;
|
|
|
|
to->di_lsn = lsn;
|
|
|
|
memset(to->di_pad2, 0, sizeof(to->di_pad2));
|
|
|
|
uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
|
2016-02-09 05:54:58 +00:00
|
|
|
to->di_flushiter = 0;
|
|
|
|
} else {
|
2020-03-18 15:15:11 +00:00
|
|
|
to->di_version = 2;
|
2016-02-09 05:54:58 +00:00
|
|
|
to->di_flushiter = from->di_flushiter;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Format the inode core. Current timestamp data is only in the VFS inode
|
|
|
|
* fields, so we need to grab them from there. Hence rather than just copying
|
|
|
|
* the XFS inode core structure, format the fields directly into the iovec.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
xfs_inode_item_format_core(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
struct xfs_log_vec *lv,
|
|
|
|
struct xfs_log_iovec **vecp)
|
|
|
|
{
|
|
|
|
struct xfs_log_dinode *dic;
|
|
|
|
|
|
|
|
dic = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_ICORE);
|
2016-02-09 05:54:58 +00:00
|
|
|
xfs_inode_to_log_dinode(ip, dic, ip->i_itemp->ili_item.li_lsn);
|
2020-03-18 15:15:10 +00:00
|
|
|
xlog_finish_iovec(lv, *vecp, xfs_log_dinode_size(ip->i_mount));
|
2016-02-09 05:54:58 +00:00
|
|
|
}
|
|
|
|
|
2013-12-13 00:00:43 +00:00
|
|
|
/*
|
|
|
|
* This is called to fill in the vector of log iovecs for the given inode
|
|
|
|
* log item. It fills the first item with an inode log format structure,
|
|
|
|
* the second with the on-disk inode structure, and a possible third and/or
|
|
|
|
* fourth with the inode data/extents/b-tree root and inode attributes
|
|
|
|
* data/extents/b-tree root.
|
2017-10-09 18:37:22 +00:00
|
|
|
*
|
|
|
|
* Note: Always use the 64 bit inode log format structure so we don't
|
|
|
|
* leave an uninitialised hole in the format item on 64 bit systems. Log
|
|
|
|
* recovery on 32 bit systems handles this just fine, so there's no reason
|
|
|
|
* for not using an initialising the properly padded structure all the time.
|
2013-12-13 00:00:43 +00:00
|
|
|
*/
|
|
|
|
STATIC void
|
|
|
|
xfs_inode_item_format(
|
|
|
|
struct xfs_log_item *lip,
|
2013-12-13 00:34:02 +00:00
|
|
|
struct xfs_log_vec *lv)
|
2013-12-13 00:00:43 +00:00
|
|
|
{
|
|
|
|
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
|
|
|
|
struct xfs_inode *ip = iip->ili_inode;
|
2013-12-13 00:34:02 +00:00
|
|
|
struct xfs_log_iovec *vecp = NULL;
|
2017-10-09 18:37:22 +00:00
|
|
|
struct xfs_inode_log_format *ilf;
|
2013-12-13 00:00:43 +00:00
|
|
|
|
2013-12-13 00:34:05 +00:00
|
|
|
ilf = xlog_prepare_iovec(lv, &vecp, XLOG_REG_TYPE_IFORMAT);
|
|
|
|
ilf->ilf_type = XFS_LI_INODE;
|
|
|
|
ilf->ilf_ino = ip->i_ino;
|
|
|
|
ilf->ilf_blkno = ip->i_imap.im_blkno;
|
|
|
|
ilf->ilf_len = ip->i_imap.im_len;
|
|
|
|
ilf->ilf_boffset = ip->i_imap.im_boffset;
|
|
|
|
ilf->ilf_fields = XFS_ILOG_CORE;
|
|
|
|
ilf->ilf_size = 2; /* format + core */
|
2017-10-09 18:37:22 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* make sure we don't leak uninitialised data into the log in the case
|
|
|
|
* when we don't log every field in the inode.
|
|
|
|
*/
|
|
|
|
ilf->ilf_dsize = 0;
|
|
|
|
ilf->ilf_asize = 0;
|
|
|
|
ilf->ilf_pad = 0;
|
2017-10-19 18:07:09 +00:00
|
|
|
memset(&ilf->ilf_u, 0, sizeof(ilf->ilf_u));
|
2017-10-09 18:37:22 +00:00
|
|
|
|
|
|
|
xlog_finish_iovec(lv, vecp, sizeof(*ilf));
|
2013-12-13 00:00:43 +00:00
|
|
|
|
2016-02-09 05:54:58 +00:00
|
|
|
xfs_inode_item_format_core(ip, lv, &vecp);
|
2013-12-13 00:34:02 +00:00
|
|
|
xfs_inode_item_format_data_fork(iip, ilf, lv, &vecp);
|
2013-12-13 00:00:43 +00:00
|
|
|
if (XFS_IFORK_Q(ip)) {
|
2013-12-13 00:34:02 +00:00
|
|
|
xfs_inode_item_format_attr_fork(iip, ilf, lv, &vecp);
|
2013-12-13 00:00:43 +00:00
|
|
|
} else {
|
|
|
|
iip->ili_fields &=
|
|
|
|
~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT);
|
|
|
|
}
|
|
|
|
|
2013-12-13 00:34:05 +00:00
|
|
|
/* update the format with the exact fields we actually logged */
|
|
|
|
ilf->ilf_fields |= (iip->ili_fields & ~XFS_ILOG_TIMESTAMP);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is called to pin the inode associated with the inode log
|
2010-02-18 12:43:22 +00:00
|
|
|
* item in memory so it cannot be written out.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
STATIC void
|
|
|
|
xfs_inode_item_pin(
|
2010-06-23 08:11:15 +00:00
|
|
|
struct xfs_log_item *lip)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2010-06-23 08:11:15 +00:00
|
|
|
struct xfs_inode *ip = INODE_ITEM(lip)->ili_inode;
|
2010-02-18 12:43:22 +00:00
|
|
|
|
2010-06-23 08:11:15 +00:00
|
|
|
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
xfs: pin inode backing buffer to the inode log item
When we dirty an inode, we are going to have to write it disk at
some point in the near future. This requires the inode cluster
backing buffer to be present in memory. Unfortunately, under severe
memory pressure we can reclaim the inode backing buffer while the
inode is dirty in memory, resulting in stalling the AIL pushing
because it has to do a read-modify-write cycle on the cluster
buffer.
When we have no memory available, the read of the cluster buffer
blocks the AIL pushing process, and this causes all sorts of issues
for memory reclaim as it requires inode writeback to make forwards
progress. Allocating a cluster buffer causes more memory pressure,
and results in more cluster buffers to be reclaimed, resulting in
more RMW cycles to be done in the AIL context and everything then
backs up on AIL progress. Only the synchronous inode cluster
writeback in the the inode reclaim code provides some level of
forwards progress guarantees that prevent OOM-killer rampages in
this situation.
Fix this by pinning the inode backing buffer to the inode log item
when the inode is first dirtied (i.e. in xfs_trans_log_inode()).
This may mean the first modification of an inode that has been held
in cache for a long time may block on a cluster buffer read, but
we can do that in transaction context and block safely until the
buffer has been allocated and read.
Once we have the cluster buffer, the inode log item takes a
reference to it, pinning it in memory, and attaches it to the log
item for future reference. This means we can always grab the cluster
buffer from the inode log item when we need it.
When the inode is finally cleaned and removed from the AIL, we can
drop the reference the inode log item holds on the cluster buffer.
Once all inodes on the cluster buffer are clean, the cluster buffer
will be unpinned and it will be available for memory reclaim to
reclaim again.
This avoids the issues with needing to do RMW cycles in the AIL
pushing context, and hence allows complete non-blocking inode
flushing to be performed by the AIL pushing context.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2020-06-29 21:49:15 +00:00
|
|
|
ASSERT(lip->li_buf);
|
2010-06-23 08:11:15 +00:00
|
|
|
|
|
|
|
trace_xfs_inode_pin(ip, _RET_IP_);
|
|
|
|
atomic_inc(&ip->i_pincount);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is called to unpin the inode associated with the inode log
|
|
|
|
* item which was previously pinned with a call to xfs_inode_item_pin().
|
2010-02-18 12:43:22 +00:00
|
|
|
*
|
|
|
|
* Also wake up anyone in xfs_iunpin_wait() if the count goes to 0.
|
xfs: pin inode backing buffer to the inode log item
When we dirty an inode, we are going to have to write it disk at
some point in the near future. This requires the inode cluster
backing buffer to be present in memory. Unfortunately, under severe
memory pressure we can reclaim the inode backing buffer while the
inode is dirty in memory, resulting in stalling the AIL pushing
because it has to do a read-modify-write cycle on the cluster
buffer.
When we have no memory available, the read of the cluster buffer
blocks the AIL pushing process, and this causes all sorts of issues
for memory reclaim as it requires inode writeback to make forwards
progress. Allocating a cluster buffer causes more memory pressure,
and results in more cluster buffers to be reclaimed, resulting in
more RMW cycles to be done in the AIL context and everything then
backs up on AIL progress. Only the synchronous inode cluster
writeback in the the inode reclaim code provides some level of
forwards progress guarantees that prevent OOM-killer rampages in
this situation.
Fix this by pinning the inode backing buffer to the inode log item
when the inode is first dirtied (i.e. in xfs_trans_log_inode()).
This may mean the first modification of an inode that has been held
in cache for a long time may block on a cluster buffer read, but
we can do that in transaction context and block safely until the
buffer has been allocated and read.
Once we have the cluster buffer, the inode log item takes a
reference to it, pinning it in memory, and attaches it to the log
item for future reference. This means we can always grab the cluster
buffer from the inode log item when we need it.
When the inode is finally cleaned and removed from the AIL, we can
drop the reference the inode log item holds on the cluster buffer.
Once all inodes on the cluster buffer are clean, the cluster buffer
will be unpinned and it will be available for memory reclaim to
reclaim again.
This avoids the issues with needing to do RMW cycles in the AIL
pushing context, and hence allows complete non-blocking inode
flushing to be performed by the AIL pushing context.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2020-06-29 21:49:15 +00:00
|
|
|
*
|
|
|
|
* Note that unpin can race with inode cluster buffer freeing marking the buffer
|
|
|
|
* stale. In that case, flush completions are run from the buffer unpin call,
|
|
|
|
* which may happen before the inode is unpinned. If we lose the race, there
|
|
|
|
* will be no buffer attached to the log item, but the inode will be marked
|
|
|
|
* XFS_ISTALE.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
STATIC void
|
|
|
|
xfs_inode_item_unpin(
|
2010-06-23 08:11:15 +00:00
|
|
|
struct xfs_log_item *lip,
|
2010-06-23 08:11:15 +00:00
|
|
|
int remove)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2010-06-23 08:11:15 +00:00
|
|
|
struct xfs_inode *ip = INODE_ITEM(lip)->ili_inode;
|
2010-02-18 12:43:22 +00:00
|
|
|
|
2010-03-08 00:24:07 +00:00
|
|
|
trace_xfs_inode_unpin(ip, _RET_IP_);
|
xfs: pin inode backing buffer to the inode log item
When we dirty an inode, we are going to have to write it disk at
some point in the near future. This requires the inode cluster
backing buffer to be present in memory. Unfortunately, under severe
memory pressure we can reclaim the inode backing buffer while the
inode is dirty in memory, resulting in stalling the AIL pushing
because it has to do a read-modify-write cycle on the cluster
buffer.
When we have no memory available, the read of the cluster buffer
blocks the AIL pushing process, and this causes all sorts of issues
for memory reclaim as it requires inode writeback to make forwards
progress. Allocating a cluster buffer causes more memory pressure,
and results in more cluster buffers to be reclaimed, resulting in
more RMW cycles to be done in the AIL context and everything then
backs up on AIL progress. Only the synchronous inode cluster
writeback in the the inode reclaim code provides some level of
forwards progress guarantees that prevent OOM-killer rampages in
this situation.
Fix this by pinning the inode backing buffer to the inode log item
when the inode is first dirtied (i.e. in xfs_trans_log_inode()).
This may mean the first modification of an inode that has been held
in cache for a long time may block on a cluster buffer read, but
we can do that in transaction context and block safely until the
buffer has been allocated and read.
Once we have the cluster buffer, the inode log item takes a
reference to it, pinning it in memory, and attaches it to the log
item for future reference. This means we can always grab the cluster
buffer from the inode log item when we need it.
When the inode is finally cleaned and removed from the AIL, we can
drop the reference the inode log item holds on the cluster buffer.
Once all inodes on the cluster buffer are clean, the cluster buffer
will be unpinned and it will be available for memory reclaim to
reclaim again.
This avoids the issues with needing to do RMW cycles in the AIL
pushing context, and hence allows complete non-blocking inode
flushing to be performed by the AIL pushing context.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2020-06-29 21:49:15 +00:00
|
|
|
ASSERT(lip->li_buf || xfs_iflags_test(ip, XFS_ISTALE));
|
2010-02-18 12:43:22 +00:00
|
|
|
ASSERT(atomic_read(&ip->i_pincount) > 0);
|
|
|
|
if (atomic_dec_and_test(&ip->i_pincount))
|
2011-12-18 20:00:10 +00:00
|
|
|
wake_up_bit(&ip->i_flags, __XFS_IPINNED_BIT);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
STATIC uint
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 05:58:39 +00:00
|
|
|
xfs_inode_item_push(
|
|
|
|
struct xfs_log_item *lip,
|
|
|
|
struct list_head *buffer_list)
|
2018-03-07 22:59:39 +00:00
|
|
|
__releases(&lip->li_ailp->ail_lock)
|
|
|
|
__acquires(&lip->li_ailp->ail_lock)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2010-06-23 08:11:15 +00:00
|
|
|
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
|
|
|
|
struct xfs_inode *ip = iip->ili_inode;
|
xfs: Properly retry failed inode items in case of error during buffer writeback
When a buffer has been failed during writeback, the inode items into it
are kept flush locked, and are never resubmitted due the flush lock, so,
if any buffer fails to be written, the items in AIL are never written to
disk and never unlocked.
This causes unmount operation to hang due these items flush locked in AIL,
but this also causes the items in AIL to never be written back, even when
the IO device comes back to normal.
I've been testing this patch with a DM-thin device, creating a
filesystem larger than the real device.
When writing enough data to fill the DM-thin device, XFS receives ENOSPC
errors from the device, and keep spinning on xfsaild (when 'retry
forever' configuration is set).
At this point, the filesystem can not be unmounted because of the flush locked
items in AIL, but worse, the items in AIL are never retried at all
(once xfs_inode_item_push() will skip the items that are flush locked),
even if the underlying DM-thin device is expanded to the proper size.
This patch fixes both cases, retrying any item that has been failed
previously, using the infra-structure provided by the previous patch.
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Carlos Maiolino <cmaiolino@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2017-08-09 01:21:50 +00:00
|
|
|
struct xfs_buf *bp = lip->li_buf;
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 05:58:39 +00:00
|
|
|
uint rval = XFS_ITEM_SUCCESS;
|
|
|
|
int error;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2020-06-29 21:49:19 +00:00
|
|
|
ASSERT(iip->ili_item.li_buf);
|
|
|
|
|
|
|
|
if (xfs_ipincount(ip) > 0 || xfs_buf_ispinned(bp) ||
|
|
|
|
(ip->i_flags & XFS_ISTALE))
|
2005-04-16 22:20:36 +00:00
|
|
|
return XFS_ITEM_PINNED;
|
|
|
|
|
2020-08-17 23:41:01 +00:00
|
|
|
if (xfs_iflags_test(ip, XFS_IFLUSHING))
|
2020-06-29 21:49:19 +00:00
|
|
|
return XFS_ITEM_FLUSHING;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2020-06-29 21:49:19 +00:00
|
|
|
if (!xfs_buf_trylock(bp))
|
|
|
|
return XFS_ITEM_LOCKED;
|
2012-04-23 05:58:36 +00:00
|
|
|
|
2020-06-29 21:49:19 +00:00
|
|
|
spin_unlock(&lip->li_ailp->ail_lock);
|
2012-06-11 14:39:43 +00:00
|
|
|
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 05:58:39 +00:00
|
|
|
/*
|
2020-06-29 21:49:19 +00:00
|
|
|
* We need to hold a reference for flushing the cluster buffer as it may
|
|
|
|
* fail the buffer without IO submission. In which case, we better get a
|
|
|
|
* reference for that completion because otherwise we don't get a
|
|
|
|
* reference for IO until we queue the buffer for delwri submission.
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 05:58:39 +00:00
|
|
|
*/
|
2020-06-29 21:49:19 +00:00
|
|
|
xfs_buf_hold(bp);
|
2020-06-29 21:49:20 +00:00
|
|
|
error = xfs_iflush_cluster(bp);
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 05:58:39 +00:00
|
|
|
if (!error) {
|
|
|
|
if (!xfs_buf_delwri_queue(bp, buffer_list))
|
|
|
|
rval = XFS_ITEM_FLUSHING;
|
|
|
|
xfs_buf_relse(bp);
|
2020-06-29 21:49:19 +00:00
|
|
|
} else {
|
2020-06-29 21:49:20 +00:00
|
|
|
/*
|
|
|
|
* Release the buffer if we were unable to flush anything. On
|
|
|
|
* any other error, the buffer has already been released.
|
|
|
|
*/
|
|
|
|
if (error == -EAGAIN)
|
|
|
|
xfs_buf_relse(bp);
|
2020-03-27 15:29:55 +00:00
|
|
|
rval = XFS_ITEM_LOCKED;
|
2020-06-29 21:49:19 +00:00
|
|
|
}
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 05:58:39 +00:00
|
|
|
|
2018-03-07 22:59:39 +00:00
|
|
|
spin_lock(&lip->li_ailp->ail_lock);
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 05:58:39 +00:00
|
|
|
return rval;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unlock the inode associated with the inode log item.
|
|
|
|
*/
|
|
|
|
STATIC void
|
2019-06-29 02:27:32 +00:00
|
|
|
xfs_inode_item_release(
|
2010-06-23 08:11:15 +00:00
|
|
|
struct xfs_log_item *lip)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2010-06-23 08:11:15 +00:00
|
|
|
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
|
|
|
|
struct xfs_inode *ip = iip->ili_inode;
|
2010-06-24 01:36:58 +00:00
|
|
|
unsigned short lock_flags;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-07-08 12:34:47 +00:00
|
|
|
ASSERT(ip->i_itemp != NULL);
|
|
|
|
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-06-24 01:36:58 +00:00
|
|
|
lock_flags = iip->ili_lock_flags;
|
|
|
|
iip->ili_lock_flags = 0;
|
2011-09-19 15:00:54 +00:00
|
|
|
if (lock_flags)
|
2011-07-08 12:34:47 +00:00
|
|
|
xfs_iunlock(ip, lock_flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2010-11-30 04:15:46 +00:00
|
|
|
* This is called to find out where the oldest active copy of the inode log
|
|
|
|
* item in the on disk log resides now that the last log write of it completed
|
|
|
|
* at the given lsn. Since we always re-log all dirty data in an inode, the
|
|
|
|
* latest copy in the on disk log is the only one that matters. Therefore,
|
|
|
|
* simply return the given lsn.
|
|
|
|
*
|
|
|
|
* If the inode has been marked stale because the cluster is being freed, we
|
|
|
|
* don't want to (re-)insert this inode into the AIL. There is a race condition
|
|
|
|
* where the cluster buffer may be unpinned before the inode is inserted into
|
|
|
|
* the AIL during transaction committed processing. If the buffer is unpinned
|
|
|
|
* before the inode item has been committed and inserted, then it is possible
|
xfs: unpin stale inodes directly in IOP_COMMITTED
When inodes are marked stale in a transaction, they are treated
specially when the inode log item is being inserted into the AIL.
It tries to avoid moving the log item forward in the AIL due to a
race condition with the writing the underlying buffer back to disk.
The was "fixed" in commit de25c18 ("xfs: avoid moving stale inodes
in the AIL").
To avoid moving the item forward, we return a LSN smaller than the
commit_lsn of the completing transaction, thereby trying to trick
the commit code into not moving the inode forward at all. I'm not
sure this ever worked as intended - it assumes the inode is already
in the AIL, but I don't think the returned LSN would have been small
enough to prevent moving the inode. It appears that the reason it
worked is that the lower LSN of the inodes meant they were inserted
into the AIL and flushed before the inode buffer (which was moved to
the commit_lsn of the transaction).
The big problem is that with delayed logging, the returning of the
different LSN means insertion takes the slow, non-bulk path. Worse
yet is that insertion is to a position -before- the commit_lsn so it
is doing a AIL traversal on every insertion, and has to walk over
all the items that have already been inserted into the AIL. It's
expensive.
To compound the matter further, with delayed logging inodes are
likely to go from clean to stale in a single checkpoint, which means
they aren't even in the AIL at all when we come across them at AIL
insertion time. Hence these were all getting inserted into the AIL
when they simply do not need to be as inodes marked XFS_ISTALE are
never written back.
Transactional/recovery integrity is maintained in this case by the
other items in the unlink transaction that were modified (e.g. the
AGI btree blocks) and committed in the same checkpoint.
So to fix this, simply unpin the stale inodes directly in
xfs_inode_item_committed() and return -1 to indicate that the AIL
insertion code does not need to do any further processing of these
inodes.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Alex Elder <aelder@sgi.com>
2011-07-04 05:27:36 +00:00
|
|
|
* for the buffer to be written and IO completes before the inode is inserted
|
2010-11-30 04:15:46 +00:00
|
|
|
* into the AIL. In that case, we'd be inserting a clean, stale inode into the
|
|
|
|
* AIL which will never get removed. It will, however, get reclaimed which
|
|
|
|
* triggers an assert in xfs_inode_free() complaining about freein an inode
|
|
|
|
* still in the AIL.
|
|
|
|
*
|
xfs: unpin stale inodes directly in IOP_COMMITTED
When inodes are marked stale in a transaction, they are treated
specially when the inode log item is being inserted into the AIL.
It tries to avoid moving the log item forward in the AIL due to a
race condition with the writing the underlying buffer back to disk.
The was "fixed" in commit de25c18 ("xfs: avoid moving stale inodes
in the AIL").
To avoid moving the item forward, we return a LSN smaller than the
commit_lsn of the completing transaction, thereby trying to trick
the commit code into not moving the inode forward at all. I'm not
sure this ever worked as intended - it assumes the inode is already
in the AIL, but I don't think the returned LSN would have been small
enough to prevent moving the inode. It appears that the reason it
worked is that the lower LSN of the inodes meant they were inserted
into the AIL and flushed before the inode buffer (which was moved to
the commit_lsn of the transaction).
The big problem is that with delayed logging, the returning of the
different LSN means insertion takes the slow, non-bulk path. Worse
yet is that insertion is to a position -before- the commit_lsn so it
is doing a AIL traversal on every insertion, and has to walk over
all the items that have already been inserted into the AIL. It's
expensive.
To compound the matter further, with delayed logging inodes are
likely to go from clean to stale in a single checkpoint, which means
they aren't even in the AIL at all when we come across them at AIL
insertion time. Hence these were all getting inserted into the AIL
when they simply do not need to be as inodes marked XFS_ISTALE are
never written back.
Transactional/recovery integrity is maintained in this case by the
other items in the unlink transaction that were modified (e.g. the
AGI btree blocks) and committed in the same checkpoint.
So to fix this, simply unpin the stale inodes directly in
xfs_inode_item_committed() and return -1 to indicate that the AIL
insertion code does not need to do any further processing of these
inodes.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Alex Elder <aelder@sgi.com>
2011-07-04 05:27:36 +00:00
|
|
|
* To avoid this, just unpin the inode directly and return a LSN of -1 so the
|
|
|
|
* transaction committed code knows that it does not need to do any further
|
|
|
|
* processing on the item.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
STATIC xfs_lsn_t
|
|
|
|
xfs_inode_item_committed(
|
2010-06-23 08:11:15 +00:00
|
|
|
struct xfs_log_item *lip,
|
2005-04-16 22:20:36 +00:00
|
|
|
xfs_lsn_t lsn)
|
|
|
|
{
|
2010-11-30 04:15:46 +00:00
|
|
|
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
|
|
|
|
struct xfs_inode *ip = iip->ili_inode;
|
|
|
|
|
xfs: unpin stale inodes directly in IOP_COMMITTED
When inodes are marked stale in a transaction, they are treated
specially when the inode log item is being inserted into the AIL.
It tries to avoid moving the log item forward in the AIL due to a
race condition with the writing the underlying buffer back to disk.
The was "fixed" in commit de25c18 ("xfs: avoid moving stale inodes
in the AIL").
To avoid moving the item forward, we return a LSN smaller than the
commit_lsn of the completing transaction, thereby trying to trick
the commit code into not moving the inode forward at all. I'm not
sure this ever worked as intended - it assumes the inode is already
in the AIL, but I don't think the returned LSN would have been small
enough to prevent moving the inode. It appears that the reason it
worked is that the lower LSN of the inodes meant they were inserted
into the AIL and flushed before the inode buffer (which was moved to
the commit_lsn of the transaction).
The big problem is that with delayed logging, the returning of the
different LSN means insertion takes the slow, non-bulk path. Worse
yet is that insertion is to a position -before- the commit_lsn so it
is doing a AIL traversal on every insertion, and has to walk over
all the items that have already been inserted into the AIL. It's
expensive.
To compound the matter further, with delayed logging inodes are
likely to go from clean to stale in a single checkpoint, which means
they aren't even in the AIL at all when we come across them at AIL
insertion time. Hence these were all getting inserted into the AIL
when they simply do not need to be as inodes marked XFS_ISTALE are
never written back.
Transactional/recovery integrity is maintained in this case by the
other items in the unlink transaction that were modified (e.g. the
AGI btree blocks) and committed in the same checkpoint.
So to fix this, simply unpin the stale inodes directly in
xfs_inode_item_committed() and return -1 to indicate that the AIL
insertion code does not need to do any further processing of these
inodes.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Alex Elder <aelder@sgi.com>
2011-07-04 05:27:36 +00:00
|
|
|
if (xfs_iflags_test(ip, XFS_ISTALE)) {
|
|
|
|
xfs_inode_item_unpin(lip, 0);
|
|
|
|
return -1;
|
|
|
|
}
|
2010-06-23 08:11:15 +00:00
|
|
|
return lsn;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
STATIC void
|
|
|
|
xfs_inode_item_committing(
|
2010-06-23 08:11:15 +00:00
|
|
|
struct xfs_log_item *lip,
|
2019-06-29 02:27:32 +00:00
|
|
|
xfs_lsn_t commit_lsn)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2019-06-29 02:27:32 +00:00
|
|
|
INODE_ITEM(lip)->ili_last_lsn = commit_lsn;
|
|
|
|
return xfs_inode_item_release(lip);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2011-10-28 09:54:24 +00:00
|
|
|
static const struct xfs_item_ops xfs_inode_item_ops = {
|
2010-06-23 08:11:15 +00:00
|
|
|
.iop_size = xfs_inode_item_size,
|
|
|
|
.iop_format = xfs_inode_item_format,
|
|
|
|
.iop_pin = xfs_inode_item_pin,
|
|
|
|
.iop_unpin = xfs_inode_item_unpin,
|
2019-06-29 02:27:32 +00:00
|
|
|
.iop_release = xfs_inode_item_release,
|
2010-06-23 08:11:15 +00:00
|
|
|
.iop_committed = xfs_inode_item_committed,
|
|
|
|
.iop_push = xfs_inode_item_push,
|
2019-06-29 02:27:32 +00:00
|
|
|
.iop_committing = xfs_inode_item_committing,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize the inode log item for a newly allocated (in-core) inode.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xfs_inode_item_init(
|
2010-06-23 08:11:15 +00:00
|
|
|
struct xfs_inode *ip,
|
|
|
|
struct xfs_mount *mp)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2010-06-23 08:11:15 +00:00
|
|
|
struct xfs_inode_log_item *iip;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
ASSERT(ip->i_itemp == NULL);
|
2020-07-22 16:23:10 +00:00
|
|
|
iip = ip->i_itemp = kmem_cache_zalloc(xfs_ili_zone,
|
|
|
|
GFP_KERNEL | __GFP_NOFAIL);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
iip->ili_inode = ip;
|
2020-06-29 21:48:46 +00:00
|
|
|
spin_lock_init(&iip->ili_lock);
|
2010-03-22 23:10:00 +00:00
|
|
|
xfs_log_item_init(mp, &iip->ili_item, XFS_LI_INODE,
|
|
|
|
&xfs_inode_item_ops);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free the inode log item and any memory hanging off of it.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xfs_inode_item_destroy(
|
xfs: pin inode backing buffer to the inode log item
When we dirty an inode, we are going to have to write it disk at
some point in the near future. This requires the inode cluster
backing buffer to be present in memory. Unfortunately, under severe
memory pressure we can reclaim the inode backing buffer while the
inode is dirty in memory, resulting in stalling the AIL pushing
because it has to do a read-modify-write cycle on the cluster
buffer.
When we have no memory available, the read of the cluster buffer
blocks the AIL pushing process, and this causes all sorts of issues
for memory reclaim as it requires inode writeback to make forwards
progress. Allocating a cluster buffer causes more memory pressure,
and results in more cluster buffers to be reclaimed, resulting in
more RMW cycles to be done in the AIL context and everything then
backs up on AIL progress. Only the synchronous inode cluster
writeback in the the inode reclaim code provides some level of
forwards progress guarantees that prevent OOM-killer rampages in
this situation.
Fix this by pinning the inode backing buffer to the inode log item
when the inode is first dirtied (i.e. in xfs_trans_log_inode()).
This may mean the first modification of an inode that has been held
in cache for a long time may block on a cluster buffer read, but
we can do that in transaction context and block safely until the
buffer has been allocated and read.
Once we have the cluster buffer, the inode log item takes a
reference to it, pinning it in memory, and attaches it to the log
item for future reference. This means we can always grab the cluster
buffer from the inode log item when we need it.
When the inode is finally cleaned and removed from the AIL, we can
drop the reference the inode log item holds on the cluster buffer.
Once all inodes on the cluster buffer are clean, the cluster buffer
will be unpinned and it will be available for memory reclaim to
reclaim again.
This avoids the issues with needing to do RMW cycles in the AIL
pushing context, and hence allows complete non-blocking inode
flushing to be performed by the AIL pushing context.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2020-06-29 21:49:15 +00:00
|
|
|
struct xfs_inode *ip)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
xfs: pin inode backing buffer to the inode log item
When we dirty an inode, we are going to have to write it disk at
some point in the near future. This requires the inode cluster
backing buffer to be present in memory. Unfortunately, under severe
memory pressure we can reclaim the inode backing buffer while the
inode is dirty in memory, resulting in stalling the AIL pushing
because it has to do a read-modify-write cycle on the cluster
buffer.
When we have no memory available, the read of the cluster buffer
blocks the AIL pushing process, and this causes all sorts of issues
for memory reclaim as it requires inode writeback to make forwards
progress. Allocating a cluster buffer causes more memory pressure,
and results in more cluster buffers to be reclaimed, resulting in
more RMW cycles to be done in the AIL context and everything then
backs up on AIL progress. Only the synchronous inode cluster
writeback in the the inode reclaim code provides some level of
forwards progress guarantees that prevent OOM-killer rampages in
this situation.
Fix this by pinning the inode backing buffer to the inode log item
when the inode is first dirtied (i.e. in xfs_trans_log_inode()).
This may mean the first modification of an inode that has been held
in cache for a long time may block on a cluster buffer read, but
we can do that in transaction context and block safely until the
buffer has been allocated and read.
Once we have the cluster buffer, the inode log item takes a
reference to it, pinning it in memory, and attaches it to the log
item for future reference. This means we can always grab the cluster
buffer from the inode log item when we need it.
When the inode is finally cleaned and removed from the AIL, we can
drop the reference the inode log item holds on the cluster buffer.
Once all inodes on the cluster buffer are clean, the cluster buffer
will be unpinned and it will be available for memory reclaim to
reclaim again.
This avoids the issues with needing to do RMW cycles in the AIL
pushing context, and hence allows complete non-blocking inode
flushing to be performed by the AIL pushing context.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2020-06-29 21:49:15 +00:00
|
|
|
struct xfs_inode_log_item *iip = ip->i_itemp;
|
|
|
|
|
|
|
|
ASSERT(iip->ili_item.li_buf == NULL);
|
|
|
|
|
|
|
|
ip->i_itemp = NULL;
|
|
|
|
kmem_free(iip->ili_item.li_lv_shadow);
|
|
|
|
kmem_cache_free(xfs_ili_zone, iip);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2020-06-29 21:49:20 +00:00
|
|
|
* We only want to pull the item from the AIL if it is actually there
|
|
|
|
* and its location in the log has not changed since we started the
|
|
|
|
* flush. Thus, we only bother if the inode's lsn has not changed.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2020-06-29 21:49:20 +00:00
|
|
|
static void
|
|
|
|
xfs_iflush_ail_updates(
|
|
|
|
struct xfs_ail *ailp,
|
|
|
|
struct list_head *list)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2020-06-29 21:49:20 +00:00
|
|
|
struct xfs_log_item *lip;
|
|
|
|
xfs_lsn_t tail_lsn = 0;
|
2010-12-20 01:03:17 +00:00
|
|
|
|
2020-06-29 21:49:20 +00:00
|
|
|
/* this is an opencoded batch version of xfs_trans_ail_delete */
|
|
|
|
spin_lock(&ailp->ail_lock);
|
|
|
|
list_for_each_entry(lip, list, li_bio_list) {
|
|
|
|
xfs_lsn_t lsn;
|
2010-12-20 01:03:17 +00:00
|
|
|
|
2020-06-29 21:49:20 +00:00
|
|
|
clear_bit(XFS_LI_FAILED, &lip->li_flags);
|
|
|
|
if (INODE_ITEM(lip)->ili_flush_lsn != lip->li_lsn)
|
2020-06-29 21:49:18 +00:00
|
|
|
continue;
|
|
|
|
|
2020-06-29 21:49:20 +00:00
|
|
|
lsn = xfs_ail_delete_one(ailp, lip);
|
|
|
|
if (!tail_lsn && lsn)
|
|
|
|
tail_lsn = lsn;
|
2010-12-20 01:03:17 +00:00
|
|
|
}
|
2020-06-29 21:49:20 +00:00
|
|
|
xfs_ail_update_finish(ailp, tail_lsn);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2020-06-29 21:49:20 +00:00
|
|
|
/*
|
|
|
|
* Walk the list of inodes that have completed their IOs. If they are clean
|
|
|
|
* remove them from the list and dissociate them from the buffer. Buffers that
|
|
|
|
* are still dirty remain linked to the buffer and on the list. Caller must
|
|
|
|
* handle them appropriately.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
xfs_iflush_finish(
|
|
|
|
struct xfs_buf *bp,
|
|
|
|
struct list_head *list)
|
|
|
|
{
|
|
|
|
struct xfs_log_item *lip, *n;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2020-06-29 21:49:20 +00:00
|
|
|
list_for_each_entry_safe(lip, n, list, li_bio_list) {
|
|
|
|
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
|
xfs: pin inode backing buffer to the inode log item
When we dirty an inode, we are going to have to write it disk at
some point in the near future. This requires the inode cluster
backing buffer to be present in memory. Unfortunately, under severe
memory pressure we can reclaim the inode backing buffer while the
inode is dirty in memory, resulting in stalling the AIL pushing
because it has to do a read-modify-write cycle on the cluster
buffer.
When we have no memory available, the read of the cluster buffer
blocks the AIL pushing process, and this causes all sorts of issues
for memory reclaim as it requires inode writeback to make forwards
progress. Allocating a cluster buffer causes more memory pressure,
and results in more cluster buffers to be reclaimed, resulting in
more RMW cycles to be done in the AIL context and everything then
backs up on AIL progress. Only the synchronous inode cluster
writeback in the the inode reclaim code provides some level of
forwards progress guarantees that prevent OOM-killer rampages in
this situation.
Fix this by pinning the inode backing buffer to the inode log item
when the inode is first dirtied (i.e. in xfs_trans_log_inode()).
This may mean the first modification of an inode that has been held
in cache for a long time may block on a cluster buffer read, but
we can do that in transaction context and block safely until the
buffer has been allocated and read.
Once we have the cluster buffer, the inode log item takes a
reference to it, pinning it in memory, and attaches it to the log
item for future reference. This means we can always grab the cluster
buffer from the inode log item when we need it.
When the inode is finally cleaned and removed from the AIL, we can
drop the reference the inode log item holds on the cluster buffer.
Once all inodes on the cluster buffer are clean, the cluster buffer
will be unpinned and it will be available for memory reclaim to
reclaim again.
This avoids the issues with needing to do RMW cycles in the AIL
pushing context, and hence allows complete non-blocking inode
flushing to be performed by the AIL pushing context.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2020-06-29 21:49:15 +00:00
|
|
|
bool drop_buffer = false;
|
|
|
|
|
2020-06-29 21:48:46 +00:00
|
|
|
spin_lock(&iip->ili_lock);
|
xfs: pin inode backing buffer to the inode log item
When we dirty an inode, we are going to have to write it disk at
some point in the near future. This requires the inode cluster
backing buffer to be present in memory. Unfortunately, under severe
memory pressure we can reclaim the inode backing buffer while the
inode is dirty in memory, resulting in stalling the AIL pushing
because it has to do a read-modify-write cycle on the cluster
buffer.
When we have no memory available, the read of the cluster buffer
blocks the AIL pushing process, and this causes all sorts of issues
for memory reclaim as it requires inode writeback to make forwards
progress. Allocating a cluster buffer causes more memory pressure,
and results in more cluster buffers to be reclaimed, resulting in
more RMW cycles to be done in the AIL context and everything then
backs up on AIL progress. Only the synchronous inode cluster
writeback in the the inode reclaim code provides some level of
forwards progress guarantees that prevent OOM-killer rampages in
this situation.
Fix this by pinning the inode backing buffer to the inode log item
when the inode is first dirtied (i.e. in xfs_trans_log_inode()).
This may mean the first modification of an inode that has been held
in cache for a long time may block on a cluster buffer read, but
we can do that in transaction context and block safely until the
buffer has been allocated and read.
Once we have the cluster buffer, the inode log item takes a
reference to it, pinning it in memory, and attaches it to the log
item for future reference. This means we can always grab the cluster
buffer from the inode log item when we need it.
When the inode is finally cleaned and removed from the AIL, we can
drop the reference the inode log item holds on the cluster buffer.
Once all inodes on the cluster buffer are clean, the cluster buffer
will be unpinned and it will be available for memory reclaim to
reclaim again.
This avoids the issues with needing to do RMW cycles in the AIL
pushing context, and hence allows complete non-blocking inode
flushing to be performed by the AIL pushing context.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2020-06-29 21:49:15 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove the reference to the cluster buffer if the inode is
|
2020-06-29 21:49:20 +00:00
|
|
|
* clean in memory and drop the buffer reference once we've
|
|
|
|
* dropped the locks we hold.
|
xfs: pin inode backing buffer to the inode log item
When we dirty an inode, we are going to have to write it disk at
some point in the near future. This requires the inode cluster
backing buffer to be present in memory. Unfortunately, under severe
memory pressure we can reclaim the inode backing buffer while the
inode is dirty in memory, resulting in stalling the AIL pushing
because it has to do a read-modify-write cycle on the cluster
buffer.
When we have no memory available, the read of the cluster buffer
blocks the AIL pushing process, and this causes all sorts of issues
for memory reclaim as it requires inode writeback to make forwards
progress. Allocating a cluster buffer causes more memory pressure,
and results in more cluster buffers to be reclaimed, resulting in
more RMW cycles to be done in the AIL context and everything then
backs up on AIL progress. Only the synchronous inode cluster
writeback in the the inode reclaim code provides some level of
forwards progress guarantees that prevent OOM-killer rampages in
this situation.
Fix this by pinning the inode backing buffer to the inode log item
when the inode is first dirtied (i.e. in xfs_trans_log_inode()).
This may mean the first modification of an inode that has been held
in cache for a long time may block on a cluster buffer read, but
we can do that in transaction context and block safely until the
buffer has been allocated and read.
Once we have the cluster buffer, the inode log item takes a
reference to it, pinning it in memory, and attaches it to the log
item for future reference. This means we can always grab the cluster
buffer from the inode log item when we need it.
When the inode is finally cleaned and removed from the AIL, we can
drop the reference the inode log item holds on the cluster buffer.
Once all inodes on the cluster buffer are clean, the cluster buffer
will be unpinned and it will be available for memory reclaim to
reclaim again.
This avoids the issues with needing to do RMW cycles in the AIL
pushing context, and hence allows complete non-blocking inode
flushing to be performed by the AIL pushing context.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2020-06-29 21:49:15 +00:00
|
|
|
*/
|
|
|
|
ASSERT(iip->ili_item.li_buf == bp);
|
|
|
|
if (!iip->ili_fields) {
|
|
|
|
iip->ili_item.li_buf = NULL;
|
2020-06-29 21:49:20 +00:00
|
|
|
list_del_init(&lip->li_bio_list);
|
xfs: pin inode backing buffer to the inode log item
When we dirty an inode, we are going to have to write it disk at
some point in the near future. This requires the inode cluster
backing buffer to be present in memory. Unfortunately, under severe
memory pressure we can reclaim the inode backing buffer while the
inode is dirty in memory, resulting in stalling the AIL pushing
because it has to do a read-modify-write cycle on the cluster
buffer.
When we have no memory available, the read of the cluster buffer
blocks the AIL pushing process, and this causes all sorts of issues
for memory reclaim as it requires inode writeback to make forwards
progress. Allocating a cluster buffer causes more memory pressure,
and results in more cluster buffers to be reclaimed, resulting in
more RMW cycles to be done in the AIL context and everything then
backs up on AIL progress. Only the synchronous inode cluster
writeback in the the inode reclaim code provides some level of
forwards progress guarantees that prevent OOM-killer rampages in
this situation.
Fix this by pinning the inode backing buffer to the inode log item
when the inode is first dirtied (i.e. in xfs_trans_log_inode()).
This may mean the first modification of an inode that has been held
in cache for a long time may block on a cluster buffer read, but
we can do that in transaction context and block safely until the
buffer has been allocated and read.
Once we have the cluster buffer, the inode log item takes a
reference to it, pinning it in memory, and attaches it to the log
item for future reference. This means we can always grab the cluster
buffer from the inode log item when we need it.
When the inode is finally cleaned and removed from the AIL, we can
drop the reference the inode log item holds on the cluster buffer.
Once all inodes on the cluster buffer are clean, the cluster buffer
will be unpinned and it will be available for memory reclaim to
reclaim again.
This avoids the issues with needing to do RMW cycles in the AIL
pushing context, and hence allows complete non-blocking inode
flushing to be performed by the AIL pushing context.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2020-06-29 21:49:15 +00:00
|
|
|
drop_buffer = true;
|
|
|
|
}
|
2010-12-20 01:03:17 +00:00
|
|
|
iip->ili_last_fields = 0;
|
xfs: pin inode backing buffer to the inode log item
When we dirty an inode, we are going to have to write it disk at
some point in the near future. This requires the inode cluster
backing buffer to be present in memory. Unfortunately, under severe
memory pressure we can reclaim the inode backing buffer while the
inode is dirty in memory, resulting in stalling the AIL pushing
because it has to do a read-modify-write cycle on the cluster
buffer.
When we have no memory available, the read of the cluster buffer
blocks the AIL pushing process, and this causes all sorts of issues
for memory reclaim as it requires inode writeback to make forwards
progress. Allocating a cluster buffer causes more memory pressure,
and results in more cluster buffers to be reclaimed, resulting in
more RMW cycles to be done in the AIL context and everything then
backs up on AIL progress. Only the synchronous inode cluster
writeback in the the inode reclaim code provides some level of
forwards progress guarantees that prevent OOM-killer rampages in
this situation.
Fix this by pinning the inode backing buffer to the inode log item
when the inode is first dirtied (i.e. in xfs_trans_log_inode()).
This may mean the first modification of an inode that has been held
in cache for a long time may block on a cluster buffer read, but
we can do that in transaction context and block safely until the
buffer has been allocated and read.
Once we have the cluster buffer, the inode log item takes a
reference to it, pinning it in memory, and attaches it to the log
item for future reference. This means we can always grab the cluster
buffer from the inode log item when we need it.
When the inode is finally cleaned and removed from the AIL, we can
drop the reference the inode log item holds on the cluster buffer.
Once all inodes on the cluster buffer are clean, the cluster buffer
will be unpinned and it will be available for memory reclaim to
reclaim again.
This avoids the issues with needing to do RMW cycles in the AIL
pushing context, and hence allows complete non-blocking inode
flushing to be performed by the AIL pushing context.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2020-06-29 21:49:15 +00:00
|
|
|
iip->ili_flush_lsn = 0;
|
2020-06-29 21:48:46 +00:00
|
|
|
spin_unlock(&iip->ili_lock);
|
2020-08-17 23:41:01 +00:00
|
|
|
xfs_iflags_clear(iip->ili_inode, XFS_IFLUSHING);
|
xfs: pin inode backing buffer to the inode log item
When we dirty an inode, we are going to have to write it disk at
some point in the near future. This requires the inode cluster
backing buffer to be present in memory. Unfortunately, under severe
memory pressure we can reclaim the inode backing buffer while the
inode is dirty in memory, resulting in stalling the AIL pushing
because it has to do a read-modify-write cycle on the cluster
buffer.
When we have no memory available, the read of the cluster buffer
blocks the AIL pushing process, and this causes all sorts of issues
for memory reclaim as it requires inode writeback to make forwards
progress. Allocating a cluster buffer causes more memory pressure,
and results in more cluster buffers to be reclaimed, resulting in
more RMW cycles to be done in the AIL context and everything then
backs up on AIL progress. Only the synchronous inode cluster
writeback in the the inode reclaim code provides some level of
forwards progress guarantees that prevent OOM-killer rampages in
this situation.
Fix this by pinning the inode backing buffer to the inode log item
when the inode is first dirtied (i.e. in xfs_trans_log_inode()).
This may mean the first modification of an inode that has been held
in cache for a long time may block on a cluster buffer read, but
we can do that in transaction context and block safely until the
buffer has been allocated and read.
Once we have the cluster buffer, the inode log item takes a
reference to it, pinning it in memory, and attaches it to the log
item for future reference. This means we can always grab the cluster
buffer from the inode log item when we need it.
When the inode is finally cleaned and removed from the AIL, we can
drop the reference the inode log item holds on the cluster buffer.
Once all inodes on the cluster buffer are clean, the cluster buffer
will be unpinned and it will be available for memory reclaim to
reclaim again.
This avoids the issues with needing to do RMW cycles in the AIL
pushing context, and hence allows complete non-blocking inode
flushing to be performed by the AIL pushing context.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2020-06-29 21:49:15 +00:00
|
|
|
if (drop_buffer)
|
|
|
|
xfs_buf_rele(bp);
|
2010-12-20 01:03:17 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2020-06-29 21:49:20 +00:00
|
|
|
/*
|
|
|
|
* Inode buffer IO completion routine. It is responsible for removing inodes
|
2020-08-17 23:41:01 +00:00
|
|
|
* attached to the buffer from the AIL if they have not been re-logged and
|
|
|
|
* completing the inode flush.
|
2020-06-29 21:49:20 +00:00
|
|
|
*/
|
|
|
|
void
|
2020-09-01 17:55:29 +00:00
|
|
|
xfs_buf_inode_iodone(
|
2020-06-29 21:49:20 +00:00
|
|
|
struct xfs_buf *bp)
|
|
|
|
{
|
|
|
|
struct xfs_log_item *lip, *n;
|
|
|
|
LIST_HEAD(flushed_inodes);
|
|
|
|
LIST_HEAD(ail_updates);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Pull the attached inodes from the buffer one at a time and take the
|
|
|
|
* appropriate action on them.
|
|
|
|
*/
|
|
|
|
list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
|
|
|
|
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
|
|
|
|
|
|
|
|
if (xfs_iflags_test(iip->ili_inode, XFS_ISTALE)) {
|
|
|
|
xfs_iflush_abort(iip->ili_inode);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!iip->ili_last_fields)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Do an unlocked check for needing the AIL lock. */
|
|
|
|
if (iip->ili_flush_lsn == lip->li_lsn ||
|
|
|
|
test_bit(XFS_LI_FAILED, &lip->li_flags))
|
|
|
|
list_move_tail(&lip->li_bio_list, &ail_updates);
|
|
|
|
else
|
|
|
|
list_move_tail(&lip->li_bio_list, &flushed_inodes);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!list_empty(&ail_updates)) {
|
|
|
|
xfs_iflush_ail_updates(bp->b_mount->m_ail, &ail_updates);
|
|
|
|
list_splice_tail(&ail_updates, &flushed_inodes);
|
|
|
|
}
|
|
|
|
|
|
|
|
xfs_iflush_finish(bp, &flushed_inodes);
|
|
|
|
if (!list_empty(&flushed_inodes))
|
|
|
|
list_splice_tail(&flushed_inodes, &bp->b_li_list);
|
|
|
|
}
|
|
|
|
|
2020-09-01 17:55:29 +00:00
|
|
|
void
|
|
|
|
xfs_buf_inode_io_fail(
|
|
|
|
struct xfs_buf *bp)
|
|
|
|
{
|
|
|
|
struct xfs_log_item *lip;
|
|
|
|
|
|
|
|
list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
|
|
|
|
set_bit(XFS_LI_FAILED, &lip->li_flags);
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2020-08-17 23:41:01 +00:00
|
|
|
* This is the inode flushing abort routine. It is called when
|
2012-04-23 05:58:41 +00:00
|
|
|
* the filesystem is shutting down to clean up the inode state. It is
|
|
|
|
* responsible for removing the inode item from the AIL if it has not been
|
2020-08-17 23:41:01 +00:00
|
|
|
* re-logged and clearing the inode's flush state.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
xfs_iflush_abort(
|
xfs: pin inode backing buffer to the inode log item
When we dirty an inode, we are going to have to write it disk at
some point in the near future. This requires the inode cluster
backing buffer to be present in memory. Unfortunately, under severe
memory pressure we can reclaim the inode backing buffer while the
inode is dirty in memory, resulting in stalling the AIL pushing
because it has to do a read-modify-write cycle on the cluster
buffer.
When we have no memory available, the read of the cluster buffer
blocks the AIL pushing process, and this causes all sorts of issues
for memory reclaim as it requires inode writeback to make forwards
progress. Allocating a cluster buffer causes more memory pressure,
and results in more cluster buffers to be reclaimed, resulting in
more RMW cycles to be done in the AIL context and everything then
backs up on AIL progress. Only the synchronous inode cluster
writeback in the the inode reclaim code provides some level of
forwards progress guarantees that prevent OOM-killer rampages in
this situation.
Fix this by pinning the inode backing buffer to the inode log item
when the inode is first dirtied (i.e. in xfs_trans_log_inode()).
This may mean the first modification of an inode that has been held
in cache for a long time may block on a cluster buffer read, but
we can do that in transaction context and block safely until the
buffer has been allocated and read.
Once we have the cluster buffer, the inode log item takes a
reference to it, pinning it in memory, and attaches it to the log
item for future reference. This means we can always grab the cluster
buffer from the inode log item when we need it.
When the inode is finally cleaned and removed from the AIL, we can
drop the reference the inode log item holds on the cluster buffer.
Once all inodes on the cluster buffer are clean, the cluster buffer
will be unpinned and it will be available for memory reclaim to
reclaim again.
This avoids the issues with needing to do RMW cycles in the AIL
pushing context, and hence allows complete non-blocking inode
flushing to be performed by the AIL pushing context.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2020-06-29 21:49:15 +00:00
|
|
|
struct xfs_inode *ip)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
xfs: pin inode backing buffer to the inode log item
When we dirty an inode, we are going to have to write it disk at
some point in the near future. This requires the inode cluster
backing buffer to be present in memory. Unfortunately, under severe
memory pressure we can reclaim the inode backing buffer while the
inode is dirty in memory, resulting in stalling the AIL pushing
because it has to do a read-modify-write cycle on the cluster
buffer.
When we have no memory available, the read of the cluster buffer
blocks the AIL pushing process, and this causes all sorts of issues
for memory reclaim as it requires inode writeback to make forwards
progress. Allocating a cluster buffer causes more memory pressure,
and results in more cluster buffers to be reclaimed, resulting in
more RMW cycles to be done in the AIL context and everything then
backs up on AIL progress. Only the synchronous inode cluster
writeback in the the inode reclaim code provides some level of
forwards progress guarantees that prevent OOM-killer rampages in
this situation.
Fix this by pinning the inode backing buffer to the inode log item
when the inode is first dirtied (i.e. in xfs_trans_log_inode()).
This may mean the first modification of an inode that has been held
in cache for a long time may block on a cluster buffer read, but
we can do that in transaction context and block safely until the
buffer has been allocated and read.
Once we have the cluster buffer, the inode log item takes a
reference to it, pinning it in memory, and attaches it to the log
item for future reference. This means we can always grab the cluster
buffer from the inode log item when we need it.
When the inode is finally cleaned and removed from the AIL, we can
drop the reference the inode log item holds on the cluster buffer.
Once all inodes on the cluster buffer are clean, the cluster buffer
will be unpinned and it will be available for memory reclaim to
reclaim again.
This avoids the issues with needing to do RMW cycles in the AIL
pushing context, and hence allows complete non-blocking inode
flushing to be performed by the AIL pushing context.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2020-06-29 21:49:15 +00:00
|
|
|
struct xfs_inode_log_item *iip = ip->i_itemp;
|
|
|
|
struct xfs_buf *bp = NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (iip) {
|
xfs: pin inode backing buffer to the inode log item
When we dirty an inode, we are going to have to write it disk at
some point in the near future. This requires the inode cluster
backing buffer to be present in memory. Unfortunately, under severe
memory pressure we can reclaim the inode backing buffer while the
inode is dirty in memory, resulting in stalling the AIL pushing
because it has to do a read-modify-write cycle on the cluster
buffer.
When we have no memory available, the read of the cluster buffer
blocks the AIL pushing process, and this causes all sorts of issues
for memory reclaim as it requires inode writeback to make forwards
progress. Allocating a cluster buffer causes more memory pressure,
and results in more cluster buffers to be reclaimed, resulting in
more RMW cycles to be done in the AIL context and everything then
backs up on AIL progress. Only the synchronous inode cluster
writeback in the the inode reclaim code provides some level of
forwards progress guarantees that prevent OOM-killer rampages in
this situation.
Fix this by pinning the inode backing buffer to the inode log item
when the inode is first dirtied (i.e. in xfs_trans_log_inode()).
This may mean the first modification of an inode that has been held
in cache for a long time may block on a cluster buffer read, but
we can do that in transaction context and block safely until the
buffer has been allocated and read.
Once we have the cluster buffer, the inode log item takes a
reference to it, pinning it in memory, and attaches it to the log
item for future reference. This means we can always grab the cluster
buffer from the inode log item when we need it.
When the inode is finally cleaned and removed from the AIL, we can
drop the reference the inode log item holds on the cluster buffer.
Once all inodes on the cluster buffer are clean, the cluster buffer
will be unpinned and it will be available for memory reclaim to
reclaim again.
This avoids the issues with needing to do RMW cycles in the AIL
pushing context, and hence allows complete non-blocking inode
flushing to be performed by the AIL pushing context.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2020-06-29 21:49:15 +00:00
|
|
|
/*
|
|
|
|
* Clear the failed bit before removing the item from the AIL so
|
|
|
|
* xfs_trans_ail_delete() doesn't try to clear and release the
|
|
|
|
* buffer attached to the log item before we are done with it.
|
|
|
|
*/
|
|
|
|
clear_bit(XFS_LI_FAILED, &iip->ili_item.li_flags);
|
2020-05-06 20:27:04 +00:00
|
|
|
xfs_trans_ail_delete(&iip->ili_item, 0);
|
xfs: pin inode backing buffer to the inode log item
When we dirty an inode, we are going to have to write it disk at
some point in the near future. This requires the inode cluster
backing buffer to be present in memory. Unfortunately, under severe
memory pressure we can reclaim the inode backing buffer while the
inode is dirty in memory, resulting in stalling the AIL pushing
because it has to do a read-modify-write cycle on the cluster
buffer.
When we have no memory available, the read of the cluster buffer
blocks the AIL pushing process, and this causes all sorts of issues
for memory reclaim as it requires inode writeback to make forwards
progress. Allocating a cluster buffer causes more memory pressure,
and results in more cluster buffers to be reclaimed, resulting in
more RMW cycles to be done in the AIL context and everything then
backs up on AIL progress. Only the synchronous inode cluster
writeback in the the inode reclaim code provides some level of
forwards progress guarantees that prevent OOM-killer rampages in
this situation.
Fix this by pinning the inode backing buffer to the inode log item
when the inode is first dirtied (i.e. in xfs_trans_log_inode()).
This may mean the first modification of an inode that has been held
in cache for a long time may block on a cluster buffer read, but
we can do that in transaction context and block safely until the
buffer has been allocated and read.
Once we have the cluster buffer, the inode log item takes a
reference to it, pinning it in memory, and attaches it to the log
item for future reference. This means we can always grab the cluster
buffer from the inode log item when we need it.
When the inode is finally cleaned and removed from the AIL, we can
drop the reference the inode log item holds on the cluster buffer.
Once all inodes on the cluster buffer are clean, the cluster buffer
will be unpinned and it will be available for memory reclaim to
reclaim again.
This avoids the issues with needing to do RMW cycles in the AIL
pushing context, and hence allows complete non-blocking inode
flushing to be performed by the AIL pushing context.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2020-06-29 21:49:15 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Clear the inode logging fields so no more flushes are
|
|
|
|
* attempted.
|
|
|
|
*/
|
2020-06-29 21:48:46 +00:00
|
|
|
spin_lock(&iip->ili_lock);
|
2020-06-29 21:48:45 +00:00
|
|
|
iip->ili_last_fields = 0;
|
2012-02-29 09:53:54 +00:00
|
|
|
iip->ili_fields = 0;
|
xfs: optimise away log forces on timestamp updates for fdatasync
xfs: timestamp updates cause excessive fdatasync log traffic
Sage Weil reported that a ceph test workload was writing to the
log on every fdatasync during an overwrite workload. Event tracing
showed that the only metadata modification being made was the
timestamp updates during the write(2) syscall, but fdatasync(2)
is supposed to ignore them. The key observation was that the
transactions in the log all looked like this:
INODE: #regs: 4 ino: 0x8b flags: 0x45 dsize: 32
And contained a flags field of 0x45 or 0x85, and had data and
attribute forks following the inode core. This means that the
timestamp updates were triggering dirty relogging of previously
logged parts of the inode that hadn't yet been flushed back to
disk.
There are two parts to this problem. The first is that XFS relogs
dirty regions in subsequent transactions, so it carries around the
fields that have been dirtied since the last time the inode was
written back to disk, not since the last time the inode was forced
into the log.
The second part is that on v5 filesystems, the inode change count
update during inode dirtying also sets the XFS_ILOG_CORE flag, so
on v5 filesystems this makes a timestamp update dirty the entire
inode.
As a result when fdatasync is run, it looks at the dirty fields in
the inode, and sees more than just the timestamp flag, even though
the only metadata change since the last fdatasync was just the
timestamps. Hence we force the log on every subsequent fdatasync
even though it is not needed.
To fix this, add a new field to the inode log item that tracks
changes since the last time fsync/fdatasync forced the log to flush
the changes to the journal. This flag is updated when we dirty the
inode, but we do it before updating the change count so it does not
carry the "core dirty" flag from timestamp updates. The fields are
zeroed when the inode is marked clean (due to writeback/freeing) or
when an fsync/datasync forces the log. Hence if we only dirty the
timestamps on the inode between fsync/fdatasync calls, the fdatasync
will not trigger another log force.
Over 100 runs of the test program:
Ext4 baseline:
runtime: 1.63s +/- 0.24s
avg lat: 1.59ms +/- 0.24ms
iops: ~2000
XFS, vanilla kernel:
runtime: 2.45s +/- 0.18s
avg lat: 2.39ms +/- 0.18ms
log forces: ~400/s
iops: ~1000
XFS, patched kernel:
runtime: 1.49s +/- 0.26s
avg lat: 1.46ms +/- 0.25ms
log forces: ~30/s
iops: ~1500
Reported-by: Sage Weil <sage@redhat.com>
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2015-11-03 02:14:59 +00:00
|
|
|
iip->ili_fsync_fields = 0;
|
xfs: pin inode backing buffer to the inode log item
When we dirty an inode, we are going to have to write it disk at
some point in the near future. This requires the inode cluster
backing buffer to be present in memory. Unfortunately, under severe
memory pressure we can reclaim the inode backing buffer while the
inode is dirty in memory, resulting in stalling the AIL pushing
because it has to do a read-modify-write cycle on the cluster
buffer.
When we have no memory available, the read of the cluster buffer
blocks the AIL pushing process, and this causes all sorts of issues
for memory reclaim as it requires inode writeback to make forwards
progress. Allocating a cluster buffer causes more memory pressure,
and results in more cluster buffers to be reclaimed, resulting in
more RMW cycles to be done in the AIL context and everything then
backs up on AIL progress. Only the synchronous inode cluster
writeback in the the inode reclaim code provides some level of
forwards progress guarantees that prevent OOM-killer rampages in
this situation.
Fix this by pinning the inode backing buffer to the inode log item
when the inode is first dirtied (i.e. in xfs_trans_log_inode()).
This may mean the first modification of an inode that has been held
in cache for a long time may block on a cluster buffer read, but
we can do that in transaction context and block safely until the
buffer has been allocated and read.
Once we have the cluster buffer, the inode log item takes a
reference to it, pinning it in memory, and attaches it to the log
item for future reference. This means we can always grab the cluster
buffer from the inode log item when we need it.
When the inode is finally cleaned and removed from the AIL, we can
drop the reference the inode log item holds on the cluster buffer.
Once all inodes on the cluster buffer are clean, the cluster buffer
will be unpinned and it will be available for memory reclaim to
reclaim again.
This avoids the issues with needing to do RMW cycles in the AIL
pushing context, and hence allows complete non-blocking inode
flushing to be performed by the AIL pushing context.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2020-06-29 21:49:15 +00:00
|
|
|
iip->ili_flush_lsn = 0;
|
|
|
|
bp = iip->ili_item.li_buf;
|
|
|
|
iip->ili_item.li_buf = NULL;
|
2020-06-29 21:49:18 +00:00
|
|
|
list_del_init(&iip->ili_item.li_bio_list);
|
2020-06-29 21:48:46 +00:00
|
|
|
spin_unlock(&iip->ili_lock);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2020-08-17 23:41:01 +00:00
|
|
|
xfs_iflags_clear(ip, XFS_IFLUSHING);
|
xfs: pin inode backing buffer to the inode log item
When we dirty an inode, we are going to have to write it disk at
some point in the near future. This requires the inode cluster
backing buffer to be present in memory. Unfortunately, under severe
memory pressure we can reclaim the inode backing buffer while the
inode is dirty in memory, resulting in stalling the AIL pushing
because it has to do a read-modify-write cycle on the cluster
buffer.
When we have no memory available, the read of the cluster buffer
blocks the AIL pushing process, and this causes all sorts of issues
for memory reclaim as it requires inode writeback to make forwards
progress. Allocating a cluster buffer causes more memory pressure,
and results in more cluster buffers to be reclaimed, resulting in
more RMW cycles to be done in the AIL context and everything then
backs up on AIL progress. Only the synchronous inode cluster
writeback in the the inode reclaim code provides some level of
forwards progress guarantees that prevent OOM-killer rampages in
this situation.
Fix this by pinning the inode backing buffer to the inode log item
when the inode is first dirtied (i.e. in xfs_trans_log_inode()).
This may mean the first modification of an inode that has been held
in cache for a long time may block on a cluster buffer read, but
we can do that in transaction context and block safely until the
buffer has been allocated and read.
Once we have the cluster buffer, the inode log item takes a
reference to it, pinning it in memory, and attaches it to the log
item for future reference. This means we can always grab the cluster
buffer from the inode log item when we need it.
When the inode is finally cleaned and removed from the AIL, we can
drop the reference the inode log item holds on the cluster buffer.
Once all inodes on the cluster buffer are clean, the cluster buffer
will be unpinned and it will be available for memory reclaim to
reclaim again.
This avoids the issues with needing to do RMW cycles in the AIL
pushing context, and hence allows complete non-blocking inode
flushing to be performed by the AIL pushing context.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2020-06-29 21:49:15 +00:00
|
|
|
if (bp)
|
|
|
|
xfs_buf_rele(bp);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-06-09 04:55:38 +00:00
|
|
|
/*
|
2017-10-09 18:37:22 +00:00
|
|
|
* convert an xfs_inode_log_format struct from the old 32 bit version
|
|
|
|
* (which can have different field alignments) to the native 64 bit version
|
2006-06-09 04:55:38 +00:00
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_inode_item_format_convert(
|
2017-10-09 18:37:22 +00:00
|
|
|
struct xfs_log_iovec *buf,
|
|
|
|
struct xfs_inode_log_format *in_f)
|
2006-06-09 04:55:38 +00:00
|
|
|
{
|
2017-10-09 18:37:22 +00:00
|
|
|
struct xfs_inode_log_format_32 *in_f32 = buf->i_addr;
|
|
|
|
|
2019-11-02 16:40:53 +00:00
|
|
|
if (buf->i_len != sizeof(*in_f32)) {
|
|
|
|
XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
|
2017-10-09 18:37:22 +00:00
|
|
|
return -EFSCORRUPTED;
|
2019-11-02 16:40:53 +00:00
|
|
|
}
|
2017-10-09 18:37:22 +00:00
|
|
|
|
|
|
|
in_f->ilf_type = in_f32->ilf_type;
|
|
|
|
in_f->ilf_size = in_f32->ilf_size;
|
|
|
|
in_f->ilf_fields = in_f32->ilf_fields;
|
|
|
|
in_f->ilf_asize = in_f32->ilf_asize;
|
|
|
|
in_f->ilf_dsize = in_f32->ilf_dsize;
|
|
|
|
in_f->ilf_ino = in_f32->ilf_ino;
|
2017-10-19 18:07:09 +00:00
|
|
|
memcpy(&in_f->ilf_u, &in_f32->ilf_u, sizeof(in_f->ilf_u));
|
2017-10-09 18:37:22 +00:00
|
|
|
in_f->ilf_blkno = in_f32->ilf_blkno;
|
|
|
|
in_f->ilf_len = in_f32->ilf_len;
|
|
|
|
in_f->ilf_boffset = in_f32->ilf_boffset;
|
|
|
|
return 0;
|
2006-06-09 04:55:38 +00:00
|
|
|
}
|