2018-06-06 02:42:14 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2009-06-10 15:07:47 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2008, Christoph Hellwig
|
|
|
|
* All Rights Reserved.
|
|
|
|
*/
|
|
|
|
#include "xfs.h"
|
2019-06-29 02:25:35 +00:00
|
|
|
#include "xfs_shared.h"
|
2013-10-22 23:51:50 +00:00
|
|
|
#include "xfs_format.h"
|
2013-08-12 10:49:23 +00:00
|
|
|
#include "xfs_log_format.h"
|
2013-08-12 10:49:32 +00:00
|
|
|
#include "xfs_trans_resv.h"
|
2013-06-05 02:09:10 +00:00
|
|
|
#include "xfs_mount.h"
|
2013-10-22 23:51:50 +00:00
|
|
|
#include "xfs_inode.h"
|
|
|
|
#include "xfs_attr.h"
|
2009-12-14 23:14:59 +00:00
|
|
|
#include "xfs_trace.h"
|
2019-11-02 16:40:53 +00:00
|
|
|
#include "xfs_error.h"
|
2019-11-07 01:19:33 +00:00
|
|
|
#include "xfs_acl.h"
|
2009-06-10 15:07:47 +00:00
|
|
|
|
2019-11-07 01:19:33 +00:00
|
|
|
#include <linux/posix_acl_xattr.h>
|
2009-06-10 15:07:47 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Locking scheme:
|
|
|
|
* - all ACL updates are protected by inode->i_mutex, which is taken before
|
|
|
|
* calling into this file.
|
|
|
|
*/
|
|
|
|
|
|
|
|
STATIC struct posix_acl *
|
2013-06-05 02:09:10 +00:00
|
|
|
xfs_acl_from_disk(
|
2019-11-02 16:40:53 +00:00
|
|
|
struct xfs_mount *mp,
|
2015-11-03 01:41:59 +00:00
|
|
|
const struct xfs_acl *aclp,
|
|
|
|
int len,
|
|
|
|
int max_entries)
|
2009-06-10 15:07:47 +00:00
|
|
|
{
|
|
|
|
struct posix_acl_entry *acl_e;
|
|
|
|
struct posix_acl *acl;
|
2015-11-03 01:41:59 +00:00
|
|
|
const struct xfs_acl_entry *ace;
|
2011-12-12 21:55:52 +00:00
|
|
|
unsigned int count, i;
|
2009-06-10 15:07:47 +00:00
|
|
|
|
2019-11-02 16:40:53 +00:00
|
|
|
if (len < sizeof(*aclp)) {
|
|
|
|
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, aclp,
|
|
|
|
len);
|
2015-11-03 01:41:59 +00:00
|
|
|
return ERR_PTR(-EFSCORRUPTED);
|
2019-11-02 16:40:53 +00:00
|
|
|
}
|
|
|
|
|
2009-06-10 15:07:47 +00:00
|
|
|
count = be32_to_cpu(aclp->acl_cnt);
|
2019-11-02 16:40:53 +00:00
|
|
|
if (count > max_entries || XFS_ACL_SIZE(count) != len) {
|
|
|
|
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, aclp,
|
|
|
|
len);
|
2011-11-20 15:35:32 +00:00
|
|
|
return ERR_PTR(-EFSCORRUPTED);
|
2019-11-02 16:40:53 +00:00
|
|
|
}
|
2009-06-10 15:07:47 +00:00
|
|
|
|
|
|
|
acl = posix_acl_alloc(count, GFP_KERNEL);
|
|
|
|
if (!acl)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
acl_e = &acl->a_entries[i];
|
|
|
|
ace = &aclp->acl_entry[i];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The tag is 32 bits on disk and 16 bits in core.
|
|
|
|
*
|
|
|
|
* Because every access to it goes through the core
|
|
|
|
* format first this is not a problem.
|
|
|
|
*/
|
|
|
|
acl_e->e_tag = be32_to_cpu(ace->ae_tag);
|
|
|
|
acl_e->e_perm = be16_to_cpu(ace->ae_perm);
|
|
|
|
|
|
|
|
switch (acl_e->e_tag) {
|
|
|
|
case ACL_USER:
|
2013-08-15 18:07:59 +00:00
|
|
|
acl_e->e_uid = xfs_uid_to_kuid(be32_to_cpu(ace->ae_id));
|
|
|
|
break;
|
2009-06-10 15:07:47 +00:00
|
|
|
case ACL_GROUP:
|
2013-08-15 18:07:59 +00:00
|
|
|
acl_e->e_gid = xfs_gid_to_kgid(be32_to_cpu(ace->ae_id));
|
2009-06-10 15:07:47 +00:00
|
|
|
break;
|
|
|
|
case ACL_USER_OBJ:
|
|
|
|
case ACL_GROUP_OBJ:
|
|
|
|
case ACL_MASK:
|
|
|
|
case ACL_OTHER:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return acl;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
posix_acl_release(acl);
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC void
|
|
|
|
xfs_acl_to_disk(struct xfs_acl *aclp, const struct posix_acl *acl)
|
|
|
|
{
|
|
|
|
const struct posix_acl_entry *acl_e;
|
|
|
|
struct xfs_acl_entry *ace;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
aclp->acl_cnt = cpu_to_be32(acl->a_count);
|
|
|
|
for (i = 0; i < acl->a_count; i++) {
|
|
|
|
ace = &aclp->acl_entry[i];
|
|
|
|
acl_e = &acl->a_entries[i];
|
|
|
|
|
|
|
|
ace->ae_tag = cpu_to_be32(acl_e->e_tag);
|
2013-08-15 18:07:59 +00:00
|
|
|
switch (acl_e->e_tag) {
|
|
|
|
case ACL_USER:
|
|
|
|
ace->ae_id = cpu_to_be32(xfs_kuid_to_uid(acl_e->e_uid));
|
|
|
|
break;
|
|
|
|
case ACL_GROUP:
|
|
|
|
ace->ae_id = cpu_to_be32(xfs_kgid_to_gid(acl_e->e_gid));
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ace->ae_id = cpu_to_be32(ACL_UNDEFINED_ID);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2009-06-10 15:07:47 +00:00
|
|
|
ace->ae_perm = cpu_to_be16(acl_e->e_perm);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct posix_acl *
|
|
|
|
xfs_get_acl(struct inode *inode, int type)
|
|
|
|
{
|
|
|
|
struct xfs_inode *ip = XFS_I(inode);
|
2013-12-20 13:16:50 +00:00
|
|
|
struct posix_acl *acl = NULL;
|
xfs: allocate xattr buffer on demand
When doing file lookups and checking for permissions, we end up in
xfs_get_acl() to see if there are any ACLs on the inode. This
requires and xattr lookup, and to do that we have to supply a buffer
large enough to hold an maximum sized xattr.
On workloads were we are accessing a wide range of cache cold files
under memory pressure (e.g. NFS fileservers) we end up spending a
lot of time allocating the buffer. The buffer is 64k in length, so
is a contiguous multi-page allocation, and if that then fails we
fall back to vmalloc(). Hence the allocation here is /expensive/
when we are looking up hundreds of thousands of files a second.
Initial numbers from a bpf trace show average time in xfs_get_acl()
is ~32us, with ~19us of that in the memory allocation. Note these
are average times, so there are going to be affected by the worst
case allocations more than the common fast case...
To avoid this, we could just do a "null" lookup to see if the ACL
xattr exists and then only do the allocation if it exists. This,
however, optimises the path for the "no ACL present" case at the
expense of the "acl present" case. i.e. we can halve the time in
xfs_get_acl() for the no acl case (i.e down to ~10-15us), but that
then increases the ACL case by 30% (i.e. up to 40-45us).
To solve this and speed up both cases, drive the xattr buffer
allocation into the attribute code once we know what the actual
xattr length is. For the no-xattr case, we avoid the allocation
completely, speeding up that case. For the common ACL case, we'll
end up with a fast heap allocation (because it'll be smaller than a
page), and only for the rarer "we have a remote xattr" will we have
a multi-page allocation occur. Hence the common ACL case will be
much faster, too.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2019-08-29 16:04:10 +00:00
|
|
|
struct xfs_acl *xfs_acl = NULL;
|
2010-01-19 23:47:48 +00:00
|
|
|
unsigned char *ea_name;
|
2009-06-10 15:07:47 +00:00
|
|
|
int error;
|
2013-06-05 02:09:10 +00:00
|
|
|
int len;
|
2009-06-10 15:07:47 +00:00
|
|
|
|
2011-07-23 15:37:31 +00:00
|
|
|
trace_xfs_get_acl(ip);
|
|
|
|
|
2009-06-10 15:07:47 +00:00
|
|
|
switch (type) {
|
|
|
|
case ACL_TYPE_ACCESS:
|
|
|
|
ea_name = SGI_ACL_FILE;
|
|
|
|
break;
|
|
|
|
case ACL_TYPE_DEFAULT:
|
|
|
|
ea_name = SGI_ACL_DEFAULT;
|
|
|
|
break;
|
|
|
|
default:
|
2009-06-09 17:29:39 +00:00
|
|
|
BUG();
|
2009-06-10 15:07:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we have a cached ACLs value just return it, not need to
|
|
|
|
* go out to the disk.
|
|
|
|
*/
|
2013-06-05 02:09:10 +00:00
|
|
|
len = XFS_ACL_MAX_SIZE(ip->i_mount);
|
2020-01-07 23:26:15 +00:00
|
|
|
error = xfs_attr_get(ip, ea_name, strlen(ea_name),
|
|
|
|
(unsigned char **)&xfs_acl, &len,
|
xfs: allocate xattr buffer on demand
When doing file lookups and checking for permissions, we end up in
xfs_get_acl() to see if there are any ACLs on the inode. This
requires and xattr lookup, and to do that we have to supply a buffer
large enough to hold an maximum sized xattr.
On workloads were we are accessing a wide range of cache cold files
under memory pressure (e.g. NFS fileservers) we end up spending a
lot of time allocating the buffer. The buffer is 64k in length, so
is a contiguous multi-page allocation, and if that then fails we
fall back to vmalloc(). Hence the allocation here is /expensive/
when we are looking up hundreds of thousands of files a second.
Initial numbers from a bpf trace show average time in xfs_get_acl()
is ~32us, with ~19us of that in the memory allocation. Note these
are average times, so there are going to be affected by the worst
case allocations more than the common fast case...
To avoid this, we could just do a "null" lookup to see if the ACL
xattr exists and then only do the allocation if it exists. This,
however, optimises the path for the "no ACL present" case at the
expense of the "acl present" case. i.e. we can halve the time in
xfs_get_acl() for the no acl case (i.e down to ~10-15us), but that
then increases the ACL case by 30% (i.e. up to 40-45us).
To solve this and speed up both cases, drive the xattr buffer
allocation into the attribute code once we know what the actual
xattr length is. For the no-xattr case, we avoid the allocation
completely, speeding up that case. For the common ACL case, we'll
end up with a fast heap allocation (because it'll be smaller than a
page), and only for the rarer "we have a remote xattr" will we have
a multi-page allocation occur. Hence the common ACL case will be
much faster, too.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2019-08-29 16:04:10 +00:00
|
|
|
ATTR_ALLOC | ATTR_ROOT);
|
2009-06-10 15:07:47 +00:00
|
|
|
if (error) {
|
|
|
|
/*
|
|
|
|
* If the attribute doesn't exist make sure we have a negative
|
2016-03-24 13:38:37 +00:00
|
|
|
* cache entry, for any other error assume it is transient.
|
2009-06-10 15:07:47 +00:00
|
|
|
*/
|
2016-03-24 13:38:37 +00:00
|
|
|
if (error != -ENOATTR)
|
|
|
|
acl = ERR_PTR(error);
|
|
|
|
} else {
|
2019-11-02 16:40:53 +00:00
|
|
|
acl = xfs_acl_from_disk(ip->i_mount, xfs_acl, len,
|
2016-03-24 13:38:37 +00:00
|
|
|
XFS_ACL_MAX_ENTRIES(ip->i_mount));
|
xfs: allocate xattr buffer on demand
When doing file lookups and checking for permissions, we end up in
xfs_get_acl() to see if there are any ACLs on the inode. This
requires and xattr lookup, and to do that we have to supply a buffer
large enough to hold an maximum sized xattr.
On workloads were we are accessing a wide range of cache cold files
under memory pressure (e.g. NFS fileservers) we end up spending a
lot of time allocating the buffer. The buffer is 64k in length, so
is a contiguous multi-page allocation, and if that then fails we
fall back to vmalloc(). Hence the allocation here is /expensive/
when we are looking up hundreds of thousands of files a second.
Initial numbers from a bpf trace show average time in xfs_get_acl()
is ~32us, with ~19us of that in the memory allocation. Note these
are average times, so there are going to be affected by the worst
case allocations more than the common fast case...
To avoid this, we could just do a "null" lookup to see if the ACL
xattr exists and then only do the allocation if it exists. This,
however, optimises the path for the "no ACL present" case at the
expense of the "acl present" case. i.e. we can halve the time in
xfs_get_acl() for the no acl case (i.e down to ~10-15us), but that
then increases the ACL case by 30% (i.e. up to 40-45us).
To solve this and speed up both cases, drive the xattr buffer
allocation into the attribute code once we know what the actual
xattr length is. For the no-xattr case, we avoid the allocation
completely, speeding up that case. For the common ACL case, we'll
end up with a fast heap allocation (because it'll be smaller than a
page), and only for the rarer "we have a remote xattr" will we have
a multi-page allocation occur. Hence the common ACL case will be
much faster, too.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2019-08-29 16:04:10 +00:00
|
|
|
kmem_free(xfs_acl);
|
2009-06-10 15:07:47 +00:00
|
|
|
}
|
|
|
|
return acl;
|
|
|
|
}
|
|
|
|
|
2017-06-26 15:48:18 +00:00
|
|
|
int
|
|
|
|
__xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
|
2009-06-10 15:07:47 +00:00
|
|
|
{
|
|
|
|
struct xfs_inode *ip = XFS_I(inode);
|
2010-01-19 23:47:48 +00:00
|
|
|
unsigned char *ea_name;
|
2009-06-10 15:07:47 +00:00
|
|
|
int error;
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case ACL_TYPE_ACCESS:
|
|
|
|
ea_name = SGI_ACL_FILE;
|
|
|
|
break;
|
|
|
|
case ACL_TYPE_DEFAULT:
|
|
|
|
if (!S_ISDIR(inode->i_mode))
|
|
|
|
return acl ? -EACCES : 0;
|
|
|
|
ea_name = SGI_ACL_DEFAULT;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (acl) {
|
|
|
|
struct xfs_acl *xfs_acl;
|
2013-06-05 02:09:10 +00:00
|
|
|
int len = XFS_ACL_MAX_SIZE(ip->i_mount);
|
2009-06-10 15:07:47 +00:00
|
|
|
|
2019-08-26 19:06:22 +00:00
|
|
|
xfs_acl = kmem_zalloc_large(len, 0);
|
2013-09-02 10:53:00 +00:00
|
|
|
if (!xfs_acl)
|
|
|
|
return -ENOMEM;
|
2009-06-10 15:07:47 +00:00
|
|
|
|
|
|
|
xfs_acl_to_disk(xfs_acl, acl);
|
2013-06-05 02:09:10 +00:00
|
|
|
|
|
|
|
/* subtract away the unused acl entries */
|
|
|
|
len -= sizeof(struct xfs_acl_entry) *
|
|
|
|
(XFS_ACL_MAX_ENTRIES(ip->i_mount) - acl->a_count);
|
2009-06-10 15:07:47 +00:00
|
|
|
|
2020-01-07 23:26:15 +00:00
|
|
|
error = xfs_attr_set(ip, ea_name, strlen(ea_name),
|
|
|
|
(unsigned char *)xfs_acl, len, ATTR_ROOT);
|
2009-06-10 15:07:47 +00:00
|
|
|
|
2013-09-02 10:53:00 +00:00
|
|
|
kmem_free(xfs_acl);
|
2009-06-10 15:07:47 +00:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* A NULL ACL argument means we want to remove the ACL.
|
|
|
|
*/
|
2020-01-07 23:26:15 +00:00
|
|
|
error = xfs_attr_remove(ip, ea_name,
|
|
|
|
strlen(ea_name),
|
|
|
|
ATTR_ROOT);
|
2009-06-10 15:07:47 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the attribute didn't exist to start with that's fine.
|
|
|
|
*/
|
|
|
|
if (error == -ENOATTR)
|
|
|
|
error = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!error)
|
2009-06-09 17:29:39 +00:00
|
|
|
set_cached_acl(inode, type, acl);
|
2009-06-10 15:07:47 +00:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2011-07-23 22:37:50 +00:00
|
|
|
xfs_set_mode(struct inode *inode, umode_t mode)
|
2009-06-10 15:07:47 +00:00
|
|
|
{
|
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
if (mode != inode->i_mode) {
|
|
|
|
struct iattr iattr;
|
|
|
|
|
2009-12-23 16:09:13 +00:00
|
|
|
iattr.ia_valid = ATTR_MODE | ATTR_CTIME;
|
2009-06-10 15:07:47 +00:00
|
|
|
iattr.ia_mode = mode;
|
2016-09-14 14:48:06 +00:00
|
|
|
iattr.ia_ctime = current_time(inode);
|
2009-06-10 15:07:47 +00:00
|
|
|
|
2014-06-25 04:58:08 +00:00
|
|
|
error = xfs_setattr_nonsize(XFS_I(inode), &iattr, XFS_ATTR_NOACL);
|
2009-06-10 15:07:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2013-12-20 13:16:50 +00:00
|
|
|
xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
|
2009-06-10 15:07:47 +00:00
|
|
|
{
|
2017-10-09 18:37:23 +00:00
|
|
|
umode_t mode;
|
|
|
|
bool set_mode = false;
|
2009-11-13 09:52:56 +00:00
|
|
|
int error = 0;
|
2009-06-10 15:07:47 +00:00
|
|
|
|
2013-12-20 13:16:50 +00:00
|
|
|
if (!acl)
|
2009-06-10 15:07:47 +00:00
|
|
|
goto set_acl;
|
|
|
|
|
2014-02-07 04:26:11 +00:00
|
|
|
error = -E2BIG;
|
2013-06-05 02:09:10 +00:00
|
|
|
if (acl->a_count > XFS_ACL_MAX_ENTRIES(XFS_M(inode->i_sb)))
|
2013-12-20 13:16:50 +00:00
|
|
|
return error;
|
2009-06-10 15:07:47 +00:00
|
|
|
|
|
|
|
if (type == ACL_TYPE_ACCESS) {
|
2016-09-19 15:39:09 +00:00
|
|
|
error = posix_acl_update_mode(inode, &mode, &acl);
|
|
|
|
if (error)
|
|
|
|
return error;
|
2017-10-09 18:37:23 +00:00
|
|
|
set_mode = true;
|
2009-06-10 15:07:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
set_acl:
|
2017-10-09 18:37:23 +00:00
|
|
|
error = __xfs_set_acl(inode, acl, type);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We set the mode after successfully updating the ACL xattr because the
|
|
|
|
* xattr update can fail at ENOSPC and we don't want to change the mode
|
|
|
|
* if the ACL update hasn't been applied.
|
|
|
|
*/
|
|
|
|
if (set_mode)
|
|
|
|
error = xfs_set_mode(inode, mode);
|
|
|
|
|
|
|
|
return error;
|
2009-06-10 15:07:47 +00:00
|
|
|
}
|