mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 13:11:40 +00:00
fsnotify: Avoid data race between fsnotify_recalc_mask() and fsnotify_object_watched()
When __fsnotify_recalc_mask() recomputes the mask on the watched object, the compiler can "optimize" the code to perform partial updates to the mask (including zeroing it at the beginning). Thus places checking the object mask without conn->lock such as fsnotify_object_watched() could see invalid states of the mask. Make sure the mask update is performed by one memory store using WRITE_ONCE(). Reported-by: syzbot+701037856c25b143f1ad@syzkaller.appspotmail.com Reported-by: Dmitry Vyukov <dvyukov@google.com> Link: https://lore.kernel.org/all/CACT4Y+Zk0ohwwwHSD63U2-PQ=UuamXczr1mKBD6xtj2dyYKBvA@mail.gmail.com Signed-off-by: Jan Kara <jack@suse.cz> Reviewed-by: Josef Bacik <josef@toxicpanda.com> Link: https://patch.msgid.link/20240717140623.27768-1-jack@suse.cz
This commit is contained in:
parent
9852d85ec9
commit
35ceae4474
@ -183,8 +183,10 @@ static bool fsnotify_event_needs_parent(struct inode *inode, __u32 mnt_mask,
|
||||
BUILD_BUG_ON(FS_EVENTS_POSS_ON_CHILD & ~FS_EVENTS_POSS_TO_PARENT);
|
||||
|
||||
/* Did either inode/sb/mount subscribe for events with parent/name? */
|
||||
marks_mask |= fsnotify_parent_needed_mask(inode->i_fsnotify_mask);
|
||||
marks_mask |= fsnotify_parent_needed_mask(inode->i_sb->s_fsnotify_mask);
|
||||
marks_mask |= fsnotify_parent_needed_mask(
|
||||
READ_ONCE(inode->i_fsnotify_mask));
|
||||
marks_mask |= fsnotify_parent_needed_mask(
|
||||
READ_ONCE(inode->i_sb->s_fsnotify_mask));
|
||||
marks_mask |= fsnotify_parent_needed_mask(mnt_mask);
|
||||
|
||||
/* Did they subscribe for this event with parent/name info? */
|
||||
@ -195,8 +197,8 @@ static bool fsnotify_event_needs_parent(struct inode *inode, __u32 mnt_mask,
|
||||
static inline bool fsnotify_object_watched(struct inode *inode, __u32 mnt_mask,
|
||||
__u32 mask)
|
||||
{
|
||||
__u32 marks_mask = inode->i_fsnotify_mask | mnt_mask |
|
||||
inode->i_sb->s_fsnotify_mask;
|
||||
__u32 marks_mask = READ_ONCE(inode->i_fsnotify_mask) | mnt_mask |
|
||||
READ_ONCE(inode->i_sb->s_fsnotify_mask);
|
||||
|
||||
return mask & marks_mask & ALL_FSNOTIFY_EVENTS;
|
||||
}
|
||||
@ -213,7 +215,8 @@ int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data,
|
||||
int data_type)
|
||||
{
|
||||
const struct path *path = fsnotify_data_path(data, data_type);
|
||||
__u32 mnt_mask = path ? real_mount(path->mnt)->mnt_fsnotify_mask : 0;
|
||||
__u32 mnt_mask = path ?
|
||||
READ_ONCE(real_mount(path->mnt)->mnt_fsnotify_mask) : 0;
|
||||
struct inode *inode = d_inode(dentry);
|
||||
struct dentry *parent;
|
||||
bool parent_watched = dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED;
|
||||
@ -557,13 +560,13 @@ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir,
|
||||
(!inode2 || !inode2->i_fsnotify_marks))
|
||||
return 0;
|
||||
|
||||
marks_mask = sb->s_fsnotify_mask;
|
||||
marks_mask = READ_ONCE(sb->s_fsnotify_mask);
|
||||
if (mnt)
|
||||
marks_mask |= mnt->mnt_fsnotify_mask;
|
||||
marks_mask |= READ_ONCE(mnt->mnt_fsnotify_mask);
|
||||
if (inode)
|
||||
marks_mask |= inode->i_fsnotify_mask;
|
||||
marks_mask |= READ_ONCE(inode->i_fsnotify_mask);
|
||||
if (inode2)
|
||||
marks_mask |= inode2->i_fsnotify_mask;
|
||||
marks_mask |= READ_ONCE(inode2->i_fsnotify_mask);
|
||||
|
||||
|
||||
/*
|
||||
|
@ -569,7 +569,7 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
|
||||
/* more bits in old than in new? */
|
||||
int dropped = (old_mask & ~new_mask);
|
||||
/* more bits in this fsn_mark than the inode's mask? */
|
||||
int do_inode = (new_mask & ~inode->i_fsnotify_mask);
|
||||
int do_inode = (new_mask & ~READ_ONCE(inode->i_fsnotify_mask));
|
||||
|
||||
/* update the inode with this new fsn_mark */
|
||||
if (dropped || do_inode)
|
||||
|
@ -128,7 +128,7 @@ __u32 fsnotify_conn_mask(struct fsnotify_mark_connector *conn)
|
||||
if (WARN_ON(!fsnotify_valid_obj_type(conn->type)))
|
||||
return 0;
|
||||
|
||||
return *fsnotify_conn_mask_p(conn);
|
||||
return READ_ONCE(*fsnotify_conn_mask_p(conn));
|
||||
}
|
||||
|
||||
static void fsnotify_get_sb_watched_objects(struct super_block *sb)
|
||||
@ -245,7 +245,11 @@ static void *__fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
|
||||
!(mark->flags & FSNOTIFY_MARK_FLAG_NO_IREF))
|
||||
want_iref = true;
|
||||
}
|
||||
*fsnotify_conn_mask_p(conn) = new_mask;
|
||||
/*
|
||||
* We use WRITE_ONCE() to prevent silly compiler optimizations from
|
||||
* confusing readers not holding conn->lock with partial updates.
|
||||
*/
|
||||
WRITE_ONCE(*fsnotify_conn_mask_p(conn), new_mask);
|
||||
|
||||
return fsnotify_update_iref(conn, want_iref);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user