mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
xfs: remove kmem_alloc_io()
Since commit 59bb47985c
("mm, sl[aou]b: guarantee natural alignment
for kmalloc(power-of-two)"), the core slab code now guarantees slab
alignment in all situations sufficient for IO purposes (i.e. minimum
of 512 byte alignment of >= 512 byte sized heap allocations) we no
longer need the workaround in the XFS code to provide this
guarantee.
Replace the use of kmem_alloc_io() with kmem_alloc() or
kmem_alloc_large() appropriately, and remove the kmem_alloc_io()
interface altogether.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
This commit is contained in:
parent
de2860f463
commit
98fe2c3cef
@ -56,31 +56,6 @@ __kmem_vmalloc(size_t size, xfs_km_flags_t flags)
|
||||
return ptr;
|
||||
}
|
||||
|
||||
/*
|
||||
* Same as kmem_alloc_large, except we guarantee the buffer returned is aligned
|
||||
* to the @align_mask. We only guarantee alignment up to page size, we'll clamp
|
||||
* alignment at page size if it is larger. vmalloc always returns a PAGE_SIZE
|
||||
* aligned region.
|
||||
*/
|
||||
void *
|
||||
kmem_alloc_io(size_t size, int align_mask, xfs_km_flags_t flags)
|
||||
{
|
||||
void *ptr;
|
||||
|
||||
trace_kmem_alloc_io(size, flags, _RET_IP_);
|
||||
|
||||
if (WARN_ON_ONCE(align_mask >= PAGE_SIZE))
|
||||
align_mask = PAGE_SIZE - 1;
|
||||
|
||||
ptr = kmem_alloc(size, flags | KM_MAYFAIL);
|
||||
if (ptr) {
|
||||
if (!((uintptr_t)ptr & align_mask))
|
||||
return ptr;
|
||||
kfree(ptr);
|
||||
}
|
||||
return __kmem_vmalloc(size, flags);
|
||||
}
|
||||
|
||||
void *
|
||||
kmem_alloc_large(size_t size, xfs_km_flags_t flags)
|
||||
{
|
||||
|
@ -57,7 +57,6 @@ kmem_flags_convert(xfs_km_flags_t flags)
|
||||
}
|
||||
|
||||
extern void *kmem_alloc(size_t, xfs_km_flags_t);
|
||||
extern void *kmem_alloc_io(size_t size, int align_mask, xfs_km_flags_t flags);
|
||||
extern void *kmem_alloc_large(size_t size, xfs_km_flags_t);
|
||||
static inline void kmem_free(const void *ptr)
|
||||
{
|
||||
|
@ -315,7 +315,6 @@ xfs_buf_alloc_kmem(
|
||||
struct xfs_buf *bp,
|
||||
xfs_buf_flags_t flags)
|
||||
{
|
||||
int align_mask = xfs_buftarg_dma_alignment(bp->b_target);
|
||||
xfs_km_flags_t kmflag_mask = KM_NOFS;
|
||||
size_t size = BBTOB(bp->b_length);
|
||||
|
||||
@ -323,7 +322,7 @@ xfs_buf_alloc_kmem(
|
||||
if (!(flags & XBF_READ))
|
||||
kmflag_mask |= KM_ZERO;
|
||||
|
||||
bp->b_addr = kmem_alloc_io(size, align_mask, kmflag_mask);
|
||||
bp->b_addr = kmem_alloc(size, kmflag_mask);
|
||||
if (!bp->b_addr)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -355,12 +355,6 @@ extern int xfs_setsize_buftarg(struct xfs_buftarg *, unsigned int);
|
||||
#define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
|
||||
#define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
|
||||
|
||||
static inline int
|
||||
xfs_buftarg_dma_alignment(struct xfs_buftarg *bt)
|
||||
{
|
||||
return queue_dma_alignment(bt->bt_bdev->bd_disk->queue);
|
||||
}
|
||||
|
||||
int xfs_buf_reverify(struct xfs_buf *bp, const struct xfs_buf_ops *ops);
|
||||
bool xfs_verify_magic(struct xfs_buf *bp, __be32 dmagic);
|
||||
bool xfs_verify_magic16(struct xfs_buf *bp, __be16 dmagic);
|
||||
|
@ -1476,7 +1476,6 @@ xlog_alloc_log(
|
||||
*/
|
||||
ASSERT(log->l_iclog_size >= 4096);
|
||||
for (i = 0; i < log->l_iclog_bufs; i++) {
|
||||
int align_mask = xfs_buftarg_dma_alignment(mp->m_logdev_targp);
|
||||
size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) *
|
||||
sizeof(struct bio_vec);
|
||||
|
||||
@ -1488,7 +1487,7 @@ xlog_alloc_log(
|
||||
iclog->ic_prev = prev_iclog;
|
||||
prev_iclog = iclog;
|
||||
|
||||
iclog->ic_data = kmem_alloc_io(log->l_iclog_size, align_mask,
|
||||
iclog->ic_data = kmem_alloc_large(log->l_iclog_size,
|
||||
KM_MAYFAIL | KM_ZERO);
|
||||
if (!iclog->ic_data)
|
||||
goto out_free_iclog;
|
||||
|
@ -79,8 +79,6 @@ xlog_alloc_buffer(
|
||||
struct xlog *log,
|
||||
int nbblks)
|
||||
{
|
||||
int align_mask = xfs_buftarg_dma_alignment(log->l_targ);
|
||||
|
||||
/*
|
||||
* Pass log block 0 since we don't have an addr yet, buffer will be
|
||||
* verified on read.
|
||||
@ -108,7 +106,7 @@ xlog_alloc_buffer(
|
||||
if (nbblks > 1 && log->l_sectBBsize > 1)
|
||||
nbblks += log->l_sectBBsize;
|
||||
nbblks = round_up(nbblks, log->l_sectBBsize);
|
||||
return kmem_alloc_io(BBTOB(nbblks), align_mask, KM_MAYFAIL | KM_ZERO);
|
||||
return kmem_alloc_large(BBTOB(nbblks), KM_MAYFAIL | KM_ZERO);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3774,7 +3774,6 @@ DEFINE_EVENT(xfs_kmem_class, name, \
|
||||
TP_PROTO(ssize_t size, int flags, unsigned long caller_ip), \
|
||||
TP_ARGS(size, flags, caller_ip))
|
||||
DEFINE_KMEM_EVENT(kmem_alloc);
|
||||
DEFINE_KMEM_EVENT(kmem_alloc_io);
|
||||
DEFINE_KMEM_EVENT(kmem_alloc_large);
|
||||
|
||||
TRACE_EVENT(xfs_check_new_dalign,
|
||||
|
Loading…
Reference in New Issue
Block a user