separate copying and locking mount tree on cross-userns copies

Rather than having propagate_mnt() check doing unprivileged copies,
lock them before commit_tree().

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Al Viro 2019-01-30 13:15:45 -05:00
parent 6d7fbce7da
commit 3bd045cc9c
3 changed files with 38 additions and 29 deletions

View File

@ -1013,27 +1013,6 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
mnt->mnt.mnt_flags = old->mnt.mnt_flags; mnt->mnt.mnt_flags = old->mnt.mnt_flags;
mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL); mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
/* Don't allow unprivileged users to change mount flags */
if (flag & CL_UNPRIVILEGED) {
mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
if (mnt->mnt.mnt_flags & MNT_READONLY)
mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
if (mnt->mnt.mnt_flags & MNT_NODEV)
mnt->mnt.mnt_flags |= MNT_LOCK_NODEV;
if (mnt->mnt.mnt_flags & MNT_NOSUID)
mnt->mnt.mnt_flags |= MNT_LOCK_NOSUID;
if (mnt->mnt.mnt_flags & MNT_NOEXEC)
mnt->mnt.mnt_flags |= MNT_LOCK_NOEXEC;
}
/* Don't allow unprivileged users to reveal what is under a mount */
if ((flag & CL_UNPRIVILEGED) &&
(!(flag & CL_EXPIRE) || list_empty(&old->mnt_expire)))
mnt->mnt.mnt_flags |= MNT_LOCKED;
atomic_inc(&sb->s_active); atomic_inc(&sb->s_active);
mnt->mnt.mnt_sb = sb; mnt->mnt.mnt_sb = sb;
@ -1837,6 +1816,33 @@ int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
return 0; return 0;
} }
static void lock_mnt_tree(struct mount *mnt)
{
struct mount *p;
for (p = mnt; p; p = next_mnt(p, mnt)) {
int flags = p->mnt.mnt_flags;
/* Don't allow unprivileged users to change mount flags */
flags |= MNT_LOCK_ATIME;
if (flags & MNT_READONLY)
flags |= MNT_LOCK_READONLY;
if (flags & MNT_NODEV)
flags |= MNT_LOCK_NODEV;
if (flags & MNT_NOSUID)
flags |= MNT_LOCK_NOSUID;
if (flags & MNT_NOEXEC)
flags |= MNT_LOCK_NOEXEC;
/* Don't allow unprivileged users to reveal what is under a mount */
if (list_empty(&p->mnt_expire))
flags |= MNT_LOCKED;
p->mnt.mnt_flags = flags;
}
}
static void cleanup_group_ids(struct mount *mnt, struct mount *end) static void cleanup_group_ids(struct mount *mnt, struct mount *end)
{ {
struct mount *p; struct mount *p;
@ -1954,6 +1960,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
struct mountpoint *dest_mp, struct mountpoint *dest_mp,
struct path *parent_path) struct path *parent_path)
{ {
struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
HLIST_HEAD(tree_list); HLIST_HEAD(tree_list);
struct mnt_namespace *ns = dest_mnt->mnt_ns; struct mnt_namespace *ns = dest_mnt->mnt_ns;
struct mountpoint *smp; struct mountpoint *smp;
@ -2004,6 +2011,9 @@ static int attach_recursive_mnt(struct mount *source_mnt,
child->mnt_mountpoint); child->mnt_mountpoint);
if (q) if (q)
mnt_change_mountpoint(child, smp, q); mnt_change_mountpoint(child, smp, q);
/* Notice when we are propagating across user namespaces */
if (child->mnt_parent->mnt_ns->user_ns != user_ns)
lock_mnt_tree(child);
commit_tree(child); commit_tree(child);
} }
put_mountpoint(smp); put_mountpoint(smp);
@ -2941,13 +2951,18 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
/* First pass: copy the tree topology */ /* First pass: copy the tree topology */
copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE; copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE;
if (user_ns != ns->user_ns) if (user_ns != ns->user_ns)
copy_flags |= CL_SHARED_TO_SLAVE | CL_UNPRIVILEGED; copy_flags |= CL_SHARED_TO_SLAVE;
new = copy_tree(old, old->mnt.mnt_root, copy_flags); new = copy_tree(old, old->mnt.mnt_root, copy_flags);
if (IS_ERR(new)) { if (IS_ERR(new)) {
namespace_unlock(); namespace_unlock();
free_mnt_ns(new_ns); free_mnt_ns(new_ns);
return ERR_CAST(new); return ERR_CAST(new);
} }
if (user_ns != ns->user_ns) {
lock_mount_hash();
lock_mnt_tree(new);
unlock_mount_hash();
}
new_ns->root = new; new_ns->root = new;
list_add_tail(&new_ns->list, &new->mnt_list); list_add_tail(&new_ns->list, &new->mnt_list);

View File

@ -214,7 +214,6 @@ static struct mount *next_group(struct mount *m, struct mount *origin)
} }
/* all accesses are serialized by namespace_sem */ /* all accesses are serialized by namespace_sem */
static struct user_namespace *user_ns;
static struct mount *last_dest, *first_source, *last_source, *dest_master; static struct mount *last_dest, *first_source, *last_source, *dest_master;
static struct mountpoint *mp; static struct mountpoint *mp;
static struct hlist_head *list; static struct hlist_head *list;
@ -260,9 +259,6 @@ static int propagate_one(struct mount *m)
type |= CL_MAKE_SHARED; type |= CL_MAKE_SHARED;
} }
/* Notice when we are propagating across user namespaces */
if (m->mnt_ns->user_ns != user_ns)
type |= CL_UNPRIVILEGED;
child = copy_tree(last_source, last_source->mnt.mnt_root, type); child = copy_tree(last_source, last_source->mnt.mnt_root, type);
if (IS_ERR(child)) if (IS_ERR(child))
return PTR_ERR(child); return PTR_ERR(child);
@ -303,7 +299,6 @@ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
* propagate_one(); everything is serialized by namespace_sem, * propagate_one(); everything is serialized by namespace_sem,
* so globals will do just fine. * so globals will do just fine.
*/ */
user_ns = current->nsproxy->mnt_ns->user_ns;
last_dest = dest_mnt; last_dest = dest_mnt;
first_source = source_mnt; first_source = source_mnt;
last_source = source_mnt; last_source = source_mnt;

View File

@ -27,8 +27,7 @@
#define CL_MAKE_SHARED 0x08 #define CL_MAKE_SHARED 0x08
#define CL_PRIVATE 0x10 #define CL_PRIVATE 0x10
#define CL_SHARED_TO_SLAVE 0x20 #define CL_SHARED_TO_SLAVE 0x20
#define CL_UNPRIVILEGED 0x40 #define CL_COPY_MNT_NS_FILE 0x40
#define CL_COPY_MNT_NS_FILE 0x80
#define CL_COPY_ALL (CL_COPY_UNBINDABLE | CL_COPY_MNT_NS_FILE) #define CL_COPY_ALL (CL_COPY_UNBINDABLE | CL_COPY_MNT_NS_FILE)