mirror of
https://github.com/torvalds/linux.git
synced 2024-12-18 00:53:40 +00:00
6dcde60efd
Dave Airlie reported the following lockdep complaint: > ====================================================== > WARNING: possible circular locking dependency detected > 5.7.0-0.rc5.20200515git1ae7efb38854.1.fc33.x86_64 #1 Not tainted > ------------------------------------------------------ > kswapd0/159 is trying to acquire lock: > ffff9b38d01a4470 (&xfs_nondir_ilock_class){++++}-{3:3}, > at: xfs_ilock+0xde/0x2c0 [xfs] > > but task is already holding lock: > ffffffffbbb8bd00 (fs_reclaim){+.+.}-{0:0}, at: > __fs_reclaim_acquire+0x5/0x30 > > which lock already depends on the new lock. > > > the existing dependency chain (in reverse order) is: > > -> #1 (fs_reclaim){+.+.}-{0:0}: > fs_reclaim_acquire+0x34/0x40 > __kmalloc+0x4f/0x270 > kmem_alloc+0x93/0x1d0 [xfs] > kmem_alloc_large+0x4c/0x130 [xfs] > xfs_attr_copy_value+0x74/0xa0 [xfs] > xfs_attr_get+0x9d/0xc0 [xfs] > xfs_get_acl+0xb6/0x200 [xfs] > get_acl+0x81/0x160 > posix_acl_xattr_get+0x3f/0xd0 > vfs_getxattr+0x148/0x170 > getxattr+0xa7/0x240 > path_getxattr+0x52/0x80 > do_syscall_64+0x5c/0xa0 > entry_SYSCALL_64_after_hwframe+0x49/0xb3 > > -> #0 (&xfs_nondir_ilock_class){++++}-{3:3}: > __lock_acquire+0x1257/0x20d0 > lock_acquire+0xb0/0x310 > down_write_nested+0x49/0x120 > xfs_ilock+0xde/0x2c0 [xfs] > xfs_reclaim_inode+0x3f/0x400 [xfs] > xfs_reclaim_inodes_ag+0x20b/0x410 [xfs] > xfs_reclaim_inodes_nr+0x31/0x40 [xfs] > super_cache_scan+0x190/0x1e0 > do_shrink_slab+0x184/0x420 > shrink_slab+0x182/0x290 > shrink_node+0x174/0x680 > balance_pgdat+0x2d0/0x5f0 > kswapd+0x21f/0x510 > kthread+0x131/0x150 > ret_from_fork+0x3a/0x50 > > other info that might help us debug this: > > Possible unsafe locking scenario: > > CPU0 CPU1 > ---- ---- > lock(fs_reclaim); > lock(&xfs_nondir_ilock_class); > lock(fs_reclaim); > lock(&xfs_nondir_ilock_class); > > *** DEADLOCK *** > > 4 locks held by kswapd0/159: > #0: ffffffffbbb8bd00 (fs_reclaim){+.+.}-{0:0}, at: > __fs_reclaim_acquire+0x5/0x30 > #1: ffffffffbbb7cef8 (shrinker_rwsem){++++}-{3:3}, at: > shrink_slab+0x115/0x290 > #2: ffff9b39f07a50e8 > (&type->s_umount_key#56){++++}-{3:3}, at: super_cache_scan+0x38/0x1e0 > #3: ffff9b39f077f258 > (&pag->pag_ici_reclaim_lock){+.+.}-{3:3}, at: > xfs_reclaim_inodes_ag+0x82/0x410 [xfs] This is a known false positive because inodes cannot simultaneously be getting reclaimed and the target of a getxattr operation, but lockdep doesn't know that. We can (selectively) shut up lockdep until either it gets smarter or we change inode reclaim not to require the ILOCK by applying a stupid GFP_NOLOCKDEP bandaid. Reported-by: Dave Airlie <airlied@gmail.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com> Tested-by: Dave Airlie <airlied@gmail.com> Reviewed-by: Brian Foster <bfoster@redhat.com>
105 lines
2.5 KiB
C
105 lines
2.5 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
|
|
* All Rights Reserved.
|
|
*/
|
|
#ifndef __XFS_SUPPORT_KMEM_H__
|
|
#define __XFS_SUPPORT_KMEM_H__
|
|
|
|
#include <linux/slab.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/vmalloc.h>
|
|
|
|
/*
|
|
* General memory allocation interfaces
|
|
*/
|
|
|
|
typedef unsigned __bitwise xfs_km_flags_t;
|
|
#define KM_NOFS ((__force xfs_km_flags_t)0x0004u)
|
|
#define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u)
|
|
#define KM_ZERO ((__force xfs_km_flags_t)0x0010u)
|
|
#define KM_NOLOCKDEP ((__force xfs_km_flags_t)0x0020u)
|
|
|
|
/*
|
|
* We use a special process flag to avoid recursive callbacks into
|
|
* the filesystem during transactions. We will also issue our own
|
|
* warnings, so we explicitly skip any generic ones (silly of us).
|
|
*/
|
|
static inline gfp_t
|
|
kmem_flags_convert(xfs_km_flags_t flags)
|
|
{
|
|
gfp_t lflags;
|
|
|
|
BUG_ON(flags & ~(KM_NOFS | KM_MAYFAIL | KM_ZERO | KM_NOLOCKDEP));
|
|
|
|
lflags = GFP_KERNEL | __GFP_NOWARN;
|
|
if (flags & KM_NOFS)
|
|
lflags &= ~__GFP_FS;
|
|
|
|
/*
|
|
* Default page/slab allocator behavior is to retry for ever
|
|
* for small allocations. We can override this behavior by using
|
|
* __GFP_RETRY_MAYFAIL which will tell the allocator to retry as long
|
|
* as it is feasible but rather fail than retry forever for all
|
|
* request sizes.
|
|
*/
|
|
if (flags & KM_MAYFAIL)
|
|
lflags |= __GFP_RETRY_MAYFAIL;
|
|
|
|
if (flags & KM_ZERO)
|
|
lflags |= __GFP_ZERO;
|
|
|
|
if (flags & KM_NOLOCKDEP)
|
|
lflags |= __GFP_NOLOCKDEP;
|
|
|
|
return lflags;
|
|
}
|
|
|
|
extern void *kmem_alloc(size_t, xfs_km_flags_t);
|
|
extern void *kmem_alloc_io(size_t size, int align_mask, xfs_km_flags_t flags);
|
|
extern void *kmem_alloc_large(size_t size, xfs_km_flags_t);
|
|
extern void *kmem_realloc(const void *, size_t, xfs_km_flags_t);
|
|
static inline void kmem_free(const void *ptr)
|
|
{
|
|
kvfree(ptr);
|
|
}
|
|
|
|
|
|
static inline void *
|
|
kmem_zalloc(size_t size, xfs_km_flags_t flags)
|
|
{
|
|
return kmem_alloc(size, flags | KM_ZERO);
|
|
}
|
|
|
|
static inline void *
|
|
kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
|
|
{
|
|
return kmem_alloc_large(size, flags | KM_ZERO);
|
|
}
|
|
|
|
/*
|
|
* Zone interfaces
|
|
*/
|
|
|
|
#define kmem_zone kmem_cache
|
|
#define kmem_zone_t struct kmem_cache
|
|
|
|
extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t);
|
|
|
|
static inline void *
|
|
kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags)
|
|
{
|
|
return kmem_zone_alloc(zone, flags | KM_ZERO);
|
|
}
|
|
|
|
static inline struct page *
|
|
kmem_to_page(void *addr)
|
|
{
|
|
if (is_vmalloc_addr(addr))
|
|
return vmalloc_to_page(addr);
|
|
return virt_to_page(addr);
|
|
}
|
|
|
|
#endif /* __XFS_SUPPORT_KMEM_H__ */
|