mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 21:21:41 +00:00
fs: Avoid grabbing sb->s_umount under bdev->bd_holder_lock
The implementation of bdev holder operations such as fs_bdev_mark_dead() and fs_bdev_sync() grab sb->s_umount semaphore under bdev->bd_holder_lock. This is problematic because it leads to disk->open_mutex -> sb->s_umount lock ordering which is counterintuitive (usually we grab higher level (e.g. filesystem) locks first and lower level (e.g. block layer) locks later) and indeed makes lockdep complain about possible locking cycles whenever we open a block device while holding sb->s_umount semaphore. Implement a function bdev_super_lock_shared() which safely transitions from holding bdev->bd_holder_lock to holding sb->s_umount on alive superblock without introducing the problematic lock dependency. We use this function fs_bdev_sync() and fs_bdev_mark_dead(). Signed-off-by: Jan Kara <jack@suse.cz> Link: https://lore.kernel.org/r/20231018152924.3858-1-jack@suse.cz Link: https://lore.kernel.org/r/20231017184823.1383356-1-hch@lst.de Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
parent
6306ff39a7
commit
fd1464105c
@ -1012,9 +1012,10 @@ void bdev_mark_dead(struct block_device *bdev, bool surprise)
|
||||
mutex_lock(&bdev->bd_holder_lock);
|
||||
if (bdev->bd_holder_ops && bdev->bd_holder_ops->mark_dead)
|
||||
bdev->bd_holder_ops->mark_dead(bdev, surprise);
|
||||
else
|
||||
else {
|
||||
mutex_unlock(&bdev->bd_holder_lock);
|
||||
sync_blockdev(bdev);
|
||||
mutex_unlock(&bdev->bd_holder_lock);
|
||||
}
|
||||
|
||||
invalidate_bdev(bdev);
|
||||
}
|
||||
|
@ -370,9 +370,10 @@ static int blkdev_flushbuf(struct block_device *bdev, unsigned cmd,
|
||||
mutex_lock(&bdev->bd_holder_lock);
|
||||
if (bdev->bd_holder_ops && bdev->bd_holder_ops->sync)
|
||||
bdev->bd_holder_ops->sync(bdev);
|
||||
else
|
||||
else {
|
||||
mutex_unlock(&bdev->bd_holder_lock);
|
||||
sync_blockdev(bdev);
|
||||
mutex_unlock(&bdev->bd_holder_lock);
|
||||
}
|
||||
|
||||
invalidate_bdev(bdev);
|
||||
return 0;
|
||||
|
50
fs/super.c
50
fs/super.c
@ -1419,32 +1419,47 @@ EXPORT_SYMBOL(sget_dev);
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
/*
|
||||
* Lock a super block that the callers holds a reference to.
|
||||
* Lock the superblock that is holder of the bdev. Returns the superblock
|
||||
* pointer if we successfully locked the superblock and it is alive. Otherwise
|
||||
* we return NULL and just unlock bdev->bd_holder_lock.
|
||||
*
|
||||
* The caller needs to ensure that the super_block isn't being freed while
|
||||
* calling this function, e.g. by holding a lock over the call to this function
|
||||
* and the place that clears the pointer to the superblock used by this function
|
||||
* before freeing the superblock.
|
||||
* The function must be called with bdev->bd_holder_lock and releases it.
|
||||
*/
|
||||
static bool super_lock_shared_active(struct super_block *sb)
|
||||
static struct super_block *bdev_super_lock_shared(struct block_device *bdev)
|
||||
__releases(&bdev->bd_holder_lock)
|
||||
{
|
||||
bool born = super_lock_shared(sb);
|
||||
struct super_block *sb = bdev->bd_holder;
|
||||
bool born;
|
||||
|
||||
lockdep_assert_held(&bdev->bd_holder_lock);
|
||||
lockdep_assert_not_held(&sb->s_umount);
|
||||
|
||||
/* Make sure sb doesn't go away from under us */
|
||||
spin_lock(&sb_lock);
|
||||
sb->s_count++;
|
||||
spin_unlock(&sb_lock);
|
||||
mutex_unlock(&bdev->bd_holder_lock);
|
||||
|
||||
born = super_lock_shared(sb);
|
||||
if (!born || !sb->s_root || !(sb->s_flags & SB_ACTIVE)) {
|
||||
super_unlock_shared(sb);
|
||||
return false;
|
||||
put_super(sb);
|
||||
return NULL;
|
||||
}
|
||||
return true;
|
||||
/*
|
||||
* The superblock is active and we hold s_umount, we can drop our
|
||||
* temporary reference now.
|
||||
*/
|
||||
put_super(sb);
|
||||
return sb;
|
||||
}
|
||||
|
||||
static void fs_bdev_mark_dead(struct block_device *bdev, bool surprise)
|
||||
{
|
||||
struct super_block *sb = bdev->bd_holder;
|
||||
struct super_block *sb;
|
||||
|
||||
/* bd_holder_lock ensures that the sb isn't freed */
|
||||
lockdep_assert_held(&bdev->bd_holder_lock);
|
||||
|
||||
if (!super_lock_shared_active(sb))
|
||||
sb = bdev_super_lock_shared(bdev);
|
||||
if (!sb)
|
||||
return;
|
||||
|
||||
if (!surprise)
|
||||
@ -1459,11 +1474,10 @@ static void fs_bdev_mark_dead(struct block_device *bdev, bool surprise)
|
||||
|
||||
static void fs_bdev_sync(struct block_device *bdev)
|
||||
{
|
||||
struct super_block *sb = bdev->bd_holder;
|
||||
struct super_block *sb;
|
||||
|
||||
lockdep_assert_held(&bdev->bd_holder_lock);
|
||||
|
||||
if (!super_lock_shared_active(sb))
|
||||
sb = bdev_super_lock_shared(bdev);
|
||||
if (!sb)
|
||||
return;
|
||||
sync_filesystem(sb);
|
||||
super_unlock_shared(sb);
|
||||
|
Loading…
Reference in New Issue
Block a user