mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
9adf40249e
We have a mechanism that checks the amount of log space remaining available every time we make a transaction reservation. If the amount of space is below a threshold (25% free) we push on the AIL to tell it to do more work. To do this, we end up calculating the LSN that the AIL needs to push to on every reservation and updating the push target for the AIL with that new target LSN. This is silly and expensive. The AIL is perfectly capable of calculating the push target itself, and it will always be running when the AIL contains objects. What the target does is determine if the AIL needs to do any work before it goes back to sleep. If we haven't run out of reservation space or memory (or some other push all trigger), it will simply go back to sleep for a while if there is more than 25% of the journal space free without doing anything. If there are items in the AIL at a lower LSN than the target, it will try to push up to the target or to the point of getting stuck before going back to sleep and trying again soon after.` Hence we can modify the AIL to calculate it's own 25% push target before it starts a push using the same reserve grant head based calculation as is currently used, and remove all the places where we ask the AIL to push to a new 25% free target. We can also drop the minimum free space size of 256BBs from the calculation because the 25% of a minimum sized log is *always going to be larger than 256BBs. This does still require a manual push in certain circumstances. These circumstances arise when the AIL is not full, but the reservation grants consume the entire of the free space in the log. In this case, we still need to push on the AIL to free up space, so when we hit this condition (i.e. reservation going to sleep to wait on log space) we do a single push to tell the AIL it should empty itself. This will keep the AIL moving as new reservations come in and want more space, rather than keep queuing them and having to push the AIL repeatedly. The reason for using the "push all" when grant space runs out is that we can run out of grant space when there is more than 25% of the log free. Small logs are notorious for this, and we have a hack in the log callback code (xlog_state_set_callback()) where we push the AIL because the *head* moved) to ensure that we kick the AIL when we consume space in it because that can push us over the "less than 25% available" available that starts tail pushing back up again. Hence when we run out of grant space and are going to sleep, we have to consider that the grant space may be consuming almost all the log space and there is almost nothing in the AIL. In this situation, the AIL pins the tail and moving the tail forwards is the only way the grant space will come available, so we have to force the AIL to push everything to guarantee grant space will eventually be returned. Hence triggering a "push all" just before sleeping removes all the nasty corner cases we have in other parts of the code that work around the "we didn't ask the AIL to push enough to free grant space" condition that leads to log space hangs... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Chandan Babu R <chandanbabu@kernel.org>
164 lines
4.6 KiB
C
164 lines
4.6 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
|
|
* All Rights Reserved.
|
|
*/
|
|
#ifndef __XFS_LOG_H__
|
|
#define __XFS_LOG_H__
|
|
|
|
struct xfs_cil_ctx;
|
|
|
|
struct xfs_log_vec {
|
|
struct list_head lv_list; /* CIL lv chain ptrs */
|
|
uint32_t lv_order_id; /* chain ordering info */
|
|
int lv_niovecs; /* number of iovecs in lv */
|
|
struct xfs_log_iovec *lv_iovecp; /* iovec array */
|
|
struct xfs_log_item *lv_item; /* owner */
|
|
char *lv_buf; /* formatted buffer */
|
|
int lv_bytes; /* accounted space in buffer */
|
|
int lv_buf_len; /* aligned size of buffer */
|
|
int lv_size; /* size of allocated lv */
|
|
};
|
|
|
|
#define XFS_LOG_VEC_ORDERED (-1)
|
|
|
|
/*
|
|
* Calculate the log iovec length for a given user buffer length. Intended to be
|
|
* used by ->iop_size implementations when sizing buffers of arbitrary
|
|
* alignments.
|
|
*/
|
|
static inline int
|
|
xlog_calc_iovec_len(int len)
|
|
{
|
|
return roundup(len, sizeof(uint32_t));
|
|
}
|
|
|
|
void *xlog_prepare_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
|
|
uint type);
|
|
|
|
static inline void
|
|
xlog_finish_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec *vec,
|
|
int data_len)
|
|
{
|
|
struct xlog_op_header *oph = vec->i_addr;
|
|
int len;
|
|
|
|
/*
|
|
* Always round up the length to the correct alignment so callers don't
|
|
* need to know anything about this log vec layout requirement. This
|
|
* means we have to zero the area the data to be written does not cover.
|
|
* This is complicated by fact the payload region is offset into the
|
|
* logvec region by the opheader that tracks the payload.
|
|
*/
|
|
len = xlog_calc_iovec_len(data_len);
|
|
if (len - data_len != 0) {
|
|
char *buf = vec->i_addr + sizeof(struct xlog_op_header);
|
|
|
|
memset(buf + data_len, 0, len - data_len);
|
|
}
|
|
|
|
/*
|
|
* The opheader tracks aligned payload length, whilst the logvec tracks
|
|
* the overall region length.
|
|
*/
|
|
oph->oh_len = cpu_to_be32(len);
|
|
|
|
len += sizeof(struct xlog_op_header);
|
|
lv->lv_buf_len += len;
|
|
lv->lv_bytes += len;
|
|
vec->i_len = len;
|
|
|
|
/* Catch buffer overruns */
|
|
ASSERT((void *)lv->lv_buf + lv->lv_bytes <= (void *)lv + lv->lv_size);
|
|
}
|
|
|
|
/*
|
|
* Copy the amount of data requested by the caller into a new log iovec.
|
|
*/
|
|
static inline void *
|
|
xlog_copy_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
|
|
uint type, void *data, int len)
|
|
{
|
|
void *buf;
|
|
|
|
buf = xlog_prepare_iovec(lv, vecp, type);
|
|
memcpy(buf, data, len);
|
|
xlog_finish_iovec(lv, *vecp, len);
|
|
return buf;
|
|
}
|
|
|
|
static inline void *
|
|
xlog_copy_from_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
|
|
const struct xfs_log_iovec *src)
|
|
{
|
|
return xlog_copy_iovec(lv, vecp, src->i_type, src->i_addr, src->i_len);
|
|
}
|
|
|
|
/*
|
|
* By comparing each component, we don't have to worry about extra
|
|
* endian issues in treating two 32 bit numbers as one 64 bit number
|
|
*/
|
|
static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
|
|
{
|
|
if (CYCLE_LSN(lsn1) != CYCLE_LSN(lsn2))
|
|
return (CYCLE_LSN(lsn1)<CYCLE_LSN(lsn2))? -999 : 999;
|
|
|
|
if (BLOCK_LSN(lsn1) != BLOCK_LSN(lsn2))
|
|
return (BLOCK_LSN(lsn1)<BLOCK_LSN(lsn2))? -999 : 999;
|
|
|
|
return 0;
|
|
}
|
|
|
|
#define XFS_LSN_CMP(x,y) _lsn_cmp(x,y)
|
|
|
|
/*
|
|
* Flags to xfs_log_force()
|
|
*
|
|
* XFS_LOG_SYNC: Synchronous force in-core log to disk
|
|
*/
|
|
#define XFS_LOG_SYNC 0x1
|
|
|
|
/* Log manager interfaces */
|
|
struct xfs_mount;
|
|
struct xlog_in_core;
|
|
struct xlog_ticket;
|
|
struct xfs_log_item;
|
|
struct xfs_item_ops;
|
|
struct xfs_trans;
|
|
struct xlog;
|
|
|
|
int xfs_log_force(struct xfs_mount *mp, uint flags);
|
|
int xfs_log_force_seq(struct xfs_mount *mp, xfs_csn_t seq, uint flags,
|
|
int *log_forced);
|
|
int xfs_log_mount(struct xfs_mount *mp,
|
|
struct xfs_buftarg *log_target,
|
|
xfs_daddr_t start_block,
|
|
int num_bblocks);
|
|
int xfs_log_mount_finish(struct xfs_mount *mp);
|
|
void xfs_log_mount_cancel(struct xfs_mount *);
|
|
xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
|
|
xfs_lsn_t xlog_assign_tail_lsn_locked(struct xfs_mount *mp);
|
|
void xfs_log_space_wake(struct xfs_mount *mp);
|
|
int xfs_log_reserve(struct xfs_mount *mp, int length, int count,
|
|
struct xlog_ticket **ticket, bool permanent);
|
|
int xfs_log_regrant(struct xfs_mount *mp, struct xlog_ticket *tic);
|
|
void xfs_log_unmount(struct xfs_mount *mp);
|
|
bool xfs_log_writable(struct xfs_mount *mp);
|
|
|
|
struct xlog_ticket *xfs_log_ticket_get(struct xlog_ticket *ticket);
|
|
void xfs_log_ticket_put(struct xlog_ticket *ticket);
|
|
|
|
void xlog_cil_process_committed(struct list_head *list);
|
|
bool xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
|
|
|
|
void xfs_log_work_queue(struct xfs_mount *mp);
|
|
int xfs_log_quiesce(struct xfs_mount *mp);
|
|
void xfs_log_clean(struct xfs_mount *mp);
|
|
bool xfs_log_check_lsn(struct xfs_mount *, xfs_lsn_t);
|
|
|
|
bool xlog_force_shutdown(struct xlog *log, uint32_t shutdown_flags);
|
|
|
|
int xfs_attr_use_log_assist(struct xfs_mount *mp);
|
|
|
|
#endif /* __XFS_LOG_H__ */
|