linux/fs/xfs/xfs_ioctl32.c

713 lines
19 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2004-2005 Silicon Graphics, Inc.
* All Rights Reserved.
*/
#include <linux/compat.h>
#include <linux/ioctl.h>
#include <linux/mount.h>
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo <tj@kernel.org> Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 08:04:11 +00:00
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/fsmap.h>
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_itable.h"
#include "xfs_error.h"
#include "xfs_fsops.h"
#include "xfs_alloc.h"
#include "xfs_rtalloc.h"
#include "xfs_attr.h"
#include "xfs_ioctl.h"
#include "xfs_ioctl32.h"
xfs: event tracing support Convert the old xfs tracing support that could only be used with the out of tree kdb and xfsidbg patches to use the generic event tracer. To use it make sure CONFIG_EVENT_TRACING is enabled and then enable all xfs trace channels by: echo 1 > /sys/kernel/debug/tracing/events/xfs/enable or alternatively enable single events by just doing the same in one event subdirectory, e.g. echo 1 > /sys/kernel/debug/tracing/events/xfs/xfs_ihold/enable or set more complex filters, etc. In Documentation/trace/events.txt all this is desctribed in more detail. To reads the events do a cat /sys/kernel/debug/tracing/trace Compared to the last posting this patch converts the tracing mostly to the one tracepoint per callsite model that other users of the new tracing facility also employ. This allows a very fine-grained control of the tracing, a cleaner output of the traces and also enables the perf tool to use each tracepoint as a virtual performance counter, allowing us to e.g. count how often certain workloads git various spots in XFS. Take a look at http://lwn.net/Articles/346470/ for some examples. Also the btree tracing isn't included at all yet, as it will require additional core tracing features not in mainline yet, I plan to deliver it later. And the really nice thing about this patch is that it actually removes many lines of code while adding this nice functionality: fs/xfs/Makefile | 8 fs/xfs/linux-2.6/xfs_acl.c | 1 fs/xfs/linux-2.6/xfs_aops.c | 52 - fs/xfs/linux-2.6/xfs_aops.h | 2 fs/xfs/linux-2.6/xfs_buf.c | 117 +-- fs/xfs/linux-2.6/xfs_buf.h | 33 fs/xfs/linux-2.6/xfs_fs_subr.c | 3 fs/xfs/linux-2.6/xfs_ioctl.c | 1 fs/xfs/linux-2.6/xfs_ioctl32.c | 1 fs/xfs/linux-2.6/xfs_iops.c | 1 fs/xfs/linux-2.6/xfs_linux.h | 1 fs/xfs/linux-2.6/xfs_lrw.c | 87 -- fs/xfs/linux-2.6/xfs_lrw.h | 45 - fs/xfs/linux-2.6/xfs_super.c | 104 --- fs/xfs/linux-2.6/xfs_super.h | 7 fs/xfs/linux-2.6/xfs_sync.c | 1 fs/xfs/linux-2.6/xfs_trace.c | 75 ++ fs/xfs/linux-2.6/xfs_trace.h | 1369 +++++++++++++++++++++++++++++++++++++++++ fs/xfs/linux-2.6/xfs_vnode.h | 4 fs/xfs/quota/xfs_dquot.c | 110 --- fs/xfs/quota/xfs_dquot.h | 21 fs/xfs/quota/xfs_qm.c | 40 - fs/xfs/quota/xfs_qm_syscalls.c | 4 fs/xfs/support/ktrace.c | 323 --------- fs/xfs/support/ktrace.h | 85 -- fs/xfs/xfs.h | 16 fs/xfs/xfs_ag.h | 14 fs/xfs/xfs_alloc.c | 230 +----- fs/xfs/xfs_alloc.h | 27 fs/xfs/xfs_alloc_btree.c | 1 fs/xfs/xfs_attr.c | 107 --- fs/xfs/xfs_attr.h | 10 fs/xfs/xfs_attr_leaf.c | 14 fs/xfs/xfs_attr_sf.h | 40 - fs/xfs/xfs_bmap.c | 507 +++------------ fs/xfs/xfs_bmap.h | 49 - fs/xfs/xfs_bmap_btree.c | 6 fs/xfs/xfs_btree.c | 5 fs/xfs/xfs_btree_trace.h | 17 fs/xfs/xfs_buf_item.c | 87 -- fs/xfs/xfs_buf_item.h | 20 fs/xfs/xfs_da_btree.c | 3 fs/xfs/xfs_da_btree.h | 7 fs/xfs/xfs_dfrag.c | 2 fs/xfs/xfs_dir2.c | 8 fs/xfs/xfs_dir2_block.c | 20 fs/xfs/xfs_dir2_leaf.c | 21 fs/xfs/xfs_dir2_node.c | 27 fs/xfs/xfs_dir2_sf.c | 26 fs/xfs/xfs_dir2_trace.c | 216 ------ fs/xfs/xfs_dir2_trace.h | 72 -- fs/xfs/xfs_filestream.c | 8 fs/xfs/xfs_fsops.c | 2 fs/xfs/xfs_iget.c | 111 --- fs/xfs/xfs_inode.c | 67 -- fs/xfs/xfs_inode.h | 76 -- fs/xfs/xfs_inode_item.c | 5 fs/xfs/xfs_iomap.c | 85 -- fs/xfs/xfs_iomap.h | 8 fs/xfs/xfs_log.c | 181 +---- fs/xfs/xfs_log_priv.h | 20 fs/xfs/xfs_log_recover.c | 1 fs/xfs/xfs_mount.c | 2 fs/xfs/xfs_quota.h | 8 fs/xfs/xfs_rename.c | 1 fs/xfs/xfs_rtalloc.c | 1 fs/xfs/xfs_rw.c | 3 fs/xfs/xfs_trans.h | 47 + fs/xfs/xfs_trans_buf.c | 62 - fs/xfs/xfs_vnodeops.c | 8 70 files changed, 2151 insertions(+), 2592 deletions(-) Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Alex Elder <aelder@sgi.com>
2009-12-14 23:14:59 +00:00
#include "xfs_trace.h"
#include "xfs_sb.h"
#define _NATIVE_IOC(cmd, type) \
_IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type))
#ifdef BROKEN_X86_ALIGNMENT
STATIC int
xfs_compat_flock64_copyin(
xfs_flock64_t *bf,
compat_xfs_flock64_t __user *arg32)
{
if (get_user(bf->l_type, &arg32->l_type) ||
get_user(bf->l_whence, &arg32->l_whence) ||
get_user(bf->l_start, &arg32->l_start) ||
get_user(bf->l_len, &arg32->l_len) ||
get_user(bf->l_sysid, &arg32->l_sysid) ||
get_user(bf->l_pid, &arg32->l_pid) ||
copy_from_user(bf->l_pad, &arg32->l_pad, 4*sizeof(u32)))
return -EFAULT;
return 0;
}
STATIC int
xfs_compat_ioc_fsgeometry_v1(
struct xfs_mount *mp,
compat_xfs_fsop_geom_v1_t __user *arg32)
{
xfs_fsop_geom_t fsgeo;
int error;
error = xfs_fs_geometry(&mp->m_sb, &fsgeo, 3);
if (error)
return error;
/* The 32-bit variant simply has some padding at the end */
if (copy_to_user(arg32, &fsgeo, sizeof(struct compat_xfs_fsop_geom_v1)))
return -EFAULT;
return 0;
}
STATIC int
xfs_compat_growfs_data_copyin(
struct xfs_growfs_data *in,
compat_xfs_growfs_data_t __user *arg32)
{
if (get_user(in->newblocks, &arg32->newblocks) ||
get_user(in->imaxpct, &arg32->imaxpct))
return -EFAULT;
return 0;
}
STATIC int
xfs_compat_growfs_rt_copyin(
struct xfs_growfs_rt *in,
compat_xfs_growfs_rt_t __user *arg32)
{
if (get_user(in->newblocks, &arg32->newblocks) ||
get_user(in->extsize, &arg32->extsize))
return -EFAULT;
return 0;
}
STATIC int
xfs_inumbers_fmt_compat(
void __user *ubuffer,
const struct xfs_inogrp *buffer,
long count,
long *written)
{
compat_xfs_inogrp_t __user *p32 = ubuffer;
long i;
for (i = 0; i < count; i++) {
if (put_user(buffer[i].xi_startino, &p32[i].xi_startino) ||
put_user(buffer[i].xi_alloccount, &p32[i].xi_alloccount) ||
put_user(buffer[i].xi_allocmask, &p32[i].xi_allocmask))
return -EFAULT;
}
*written = count * sizeof(*p32);
return 0;
}
#else
#define xfs_inumbers_fmt_compat xfs_inumbers_fmt
#endif /* BROKEN_X86_ALIGNMENT */
STATIC int
xfs_ioctl32_bstime_copyin(
xfs_bstime_t *bstime,
compat_xfs_bstime_t __user *bstime32)
{
compat_time_t sec32; /* tv_sec differs on 64 vs. 32 */
if (get_user(sec32, &bstime32->tv_sec) ||
get_user(bstime->tv_nsec, &bstime32->tv_nsec))
return -EFAULT;
bstime->tv_sec = sec32;
return 0;
}
/* xfs_bstat_t has differing alignment on intel, & bstime_t sizes everywhere */
STATIC int
xfs_ioctl32_bstat_copyin(
xfs_bstat_t *bstat,
compat_xfs_bstat_t __user *bstat32)
{
if (get_user(bstat->bs_ino, &bstat32->bs_ino) ||
get_user(bstat->bs_mode, &bstat32->bs_mode) ||
get_user(bstat->bs_nlink, &bstat32->bs_nlink) ||
get_user(bstat->bs_uid, &bstat32->bs_uid) ||
get_user(bstat->bs_gid, &bstat32->bs_gid) ||
get_user(bstat->bs_rdev, &bstat32->bs_rdev) ||
get_user(bstat->bs_blksize, &bstat32->bs_blksize) ||
get_user(bstat->bs_size, &bstat32->bs_size) ||
xfs_ioctl32_bstime_copyin(&bstat->bs_atime, &bstat32->bs_atime) ||
xfs_ioctl32_bstime_copyin(&bstat->bs_mtime, &bstat32->bs_mtime) ||
xfs_ioctl32_bstime_copyin(&bstat->bs_ctime, &bstat32->bs_ctime) ||
get_user(bstat->bs_blocks, &bstat32->bs_size) ||
get_user(bstat->bs_xflags, &bstat32->bs_size) ||
get_user(bstat->bs_extsize, &bstat32->bs_extsize) ||
get_user(bstat->bs_extents, &bstat32->bs_extents) ||
get_user(bstat->bs_gen, &bstat32->bs_gen) ||
get_user(bstat->bs_projid_lo, &bstat32->bs_projid_lo) ||
get_user(bstat->bs_projid_hi, &bstat32->bs_projid_hi) ||
get_user(bstat->bs_forkoff, &bstat32->bs_forkoff) ||
get_user(bstat->bs_dmevmask, &bstat32->bs_dmevmask) ||
get_user(bstat->bs_dmstate, &bstat32->bs_dmstate) ||
get_user(bstat->bs_aextents, &bstat32->bs_aextents))
return -EFAULT;
return 0;
}
/* XFS_IOC_FSBULKSTAT and friends */
STATIC int
xfs_bstime_store_compat(
compat_xfs_bstime_t __user *p32,
const xfs_bstime_t *p)
{
__s32 sec32;
sec32 = p->tv_sec;
if (put_user(sec32, &p32->tv_sec) ||
put_user(p->tv_nsec, &p32->tv_nsec))
return -EFAULT;
return 0;
}
/* Return 0 on success or positive error (to xfs_bulkstat()) */
STATIC int
xfs_bulkstat_one_fmt_compat(
void __user *ubuffer,
int ubsize,
int *ubused,
const xfs_bstat_t *buffer)
{
compat_xfs_bstat_t __user *p32 = ubuffer;
if (ubsize < sizeof(*p32))
return -ENOMEM;
if (put_user(buffer->bs_ino, &p32->bs_ino) ||
put_user(buffer->bs_mode, &p32->bs_mode) ||
put_user(buffer->bs_nlink, &p32->bs_nlink) ||
put_user(buffer->bs_uid, &p32->bs_uid) ||
put_user(buffer->bs_gid, &p32->bs_gid) ||
put_user(buffer->bs_rdev, &p32->bs_rdev) ||
put_user(buffer->bs_blksize, &p32->bs_blksize) ||
put_user(buffer->bs_size, &p32->bs_size) ||
xfs_bstime_store_compat(&p32->bs_atime, &buffer->bs_atime) ||
xfs_bstime_store_compat(&p32->bs_mtime, &buffer->bs_mtime) ||
xfs_bstime_store_compat(&p32->bs_ctime, &buffer->bs_ctime) ||
put_user(buffer->bs_blocks, &p32->bs_blocks) ||
put_user(buffer->bs_xflags, &p32->bs_xflags) ||
put_user(buffer->bs_extsize, &p32->bs_extsize) ||
put_user(buffer->bs_extents, &p32->bs_extents) ||
put_user(buffer->bs_gen, &p32->bs_gen) ||
put_user(buffer->bs_projid, &p32->bs_projid) ||
put_user(buffer->bs_projid_hi, &p32->bs_projid_hi) ||
put_user(buffer->bs_forkoff, &p32->bs_forkoff) ||
put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) ||
put_user(buffer->bs_dmstate, &p32->bs_dmstate) ||
put_user(buffer->bs_aextents, &p32->bs_aextents))
return -EFAULT;
if (ubused)
*ubused = sizeof(*p32);
return 0;
}
STATIC int
xfs_bulkstat_one_compat(
xfs_mount_t *mp, /* mount point for filesystem */
xfs_ino_t ino, /* inode number to get data for */
void __user *buffer, /* buffer to place output in */
int ubsize, /* size of buffer */
int *ubused, /* bytes used by me */
int *stat) /* BULKSTAT_RV_... */
{
return xfs_bulkstat_one_int(mp, ino, buffer, ubsize,
xfs_bulkstat_one_fmt_compat,
ubused, stat);
}
/* copied from xfs_ioctl.c */
STATIC int
xfs_compat_ioc_bulkstat(
xfs_mount_t *mp,
unsigned int cmd,
compat_xfs_fsop_bulkreq_t __user *p32)
{
u32 addr;
xfs_fsop_bulkreq_t bulkreq;
int count; /* # of records returned */
xfs_ino_t inlast; /* last inode number */
int done;
int error;
/*
* Output structure handling functions. Depending on the command,
* either the xfs_bstat and xfs_inogrp structures are written out
* to userpace memory via bulkreq.ubuffer. Normally the compat
* functions and structure size are the correct ones to use ...
*/
inumbers_fmt_pf inumbers_func = xfs_inumbers_fmt_compat;
bulkstat_one_pf bs_one_func = xfs_bulkstat_one_compat;
size_t bs_one_size = sizeof(struct compat_xfs_bstat);
#ifdef CONFIG_X86_X32
if (in_x32_syscall()) {
/*
* ... but on x32 the input xfs_fsop_bulkreq has pointers
* which must be handled in the "compat" (32-bit) way, while
* the xfs_bstat and xfs_inogrp structures follow native 64-
* bit layout convention. So adjust accordingly, otherwise
* the data written out in compat layout will not match what
* x32 userspace expects.
*/
inumbers_func = xfs_inumbers_fmt;
bs_one_func = xfs_bulkstat_one;
bs_one_size = sizeof(struct xfs_bstat);
}
#endif
/* done = 1 if there are more stats to get and if bulkstat */
/* should be called again (unused here, but used in dmapi) */
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
if (get_user(addr, &p32->lastip))
return -EFAULT;
bulkreq.lastip = compat_ptr(addr);
if (get_user(bulkreq.icount, &p32->icount) ||
get_user(addr, &p32->ubuffer))
return -EFAULT;
bulkreq.ubuffer = compat_ptr(addr);
if (get_user(addr, &p32->ocount))
return -EFAULT;
bulkreq.ocount = compat_ptr(addr);
if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64)))
return -EFAULT;
if ((count = bulkreq.icount) <= 0)
return -EINVAL;
[XFS] 971064 Various fixups for xfs_bulkstat(). - sanity check for NULL user buffer in xfs_ioc_bulkstat[_compat]() - remove the special case for XFS_IOC_FSBULKSTAT with count == 1. This special case causes bulkstat to fail because the special case uses xfs_bulkstat_single() instead of xfs_bulkstat() and the two functions have different semantics. xfs_bulkstat() will return the next inode after the one supplied while skipping internal inodes (ie quota inodes). xfs_bulkstate_single() will only lookup the inode supplied and return an error if it is an internal inode. - in xfs_bulkstat(), need to initialise 'lastino' to the inode supplied so in cases were we return without examining any inodes the scan wont restart back at zero. - sanity check for valid *ubcountp values. Cannot sanity check for valid ubuffer here because some users of xfs_bulkstat() don't supply a buffer. - checks against 'ubleft' (the space left in the user's buffer) should be against 'statstruct_size' which is the supplied minimum object size. The mixture of checks against statstruct_size and 0 was one of the reasons we were skipping inodes. - if the formatter function returns BULKSTAT_RV_NOTHING and an error and the error is not ENOENT or EINVAL then we need to abort the scan. ENOENT is for inodes that are no longer valid and we just skip them. EINVAL is returned if we try to lookup an internal inode so we skip them too. For a DMF scan if the inode and DMF attribute cannot fit into the space left in the user's buffer it would return ERANGE. We didn't handle this error and skipped the inode. We would continue to skip inodes until one fitted into the user's buffer or we completed the scan. - put back the recalculation of agino (that got removed with the last fix) at the end of the while loop. This is because the code at the start of the loop expects agino to be the last inode examined if it is non-zero. - if we found some inodes but then encountered an error, return success this time and the error next time. If the formatter aborted with ENOMEM we will now return this error but only if we couldn't read any inodes. Previously if we encountered ENOMEM without reading any inodes we returned a zero count and no error which falsely indicated the scan was complete. SGI-PV: 973431 SGI-Modid: xfs-linux-melb:xfs-kern:30089a Signed-off-by: Lachlan McIlroy <lachlan@sgi.com> Signed-off-by: David Chinner <dgc@sgi.com>
2007-11-23 05:30:32 +00:00
if (bulkreq.ubuffer == NULL)
return -EINVAL;
[XFS] 971064 Various fixups for xfs_bulkstat(). - sanity check for NULL user buffer in xfs_ioc_bulkstat[_compat]() - remove the special case for XFS_IOC_FSBULKSTAT with count == 1. This special case causes bulkstat to fail because the special case uses xfs_bulkstat_single() instead of xfs_bulkstat() and the two functions have different semantics. xfs_bulkstat() will return the next inode after the one supplied while skipping internal inodes (ie quota inodes). xfs_bulkstate_single() will only lookup the inode supplied and return an error if it is an internal inode. - in xfs_bulkstat(), need to initialise 'lastino' to the inode supplied so in cases were we return without examining any inodes the scan wont restart back at zero. - sanity check for valid *ubcountp values. Cannot sanity check for valid ubuffer here because some users of xfs_bulkstat() don't supply a buffer. - checks against 'ubleft' (the space left in the user's buffer) should be against 'statstruct_size' which is the supplied minimum object size. The mixture of checks against statstruct_size and 0 was one of the reasons we were skipping inodes. - if the formatter function returns BULKSTAT_RV_NOTHING and an error and the error is not ENOENT or EINVAL then we need to abort the scan. ENOENT is for inodes that are no longer valid and we just skip them. EINVAL is returned if we try to lookup an internal inode so we skip them too. For a DMF scan if the inode and DMF attribute cannot fit into the space left in the user's buffer it would return ERANGE. We didn't handle this error and skipped the inode. We would continue to skip inodes until one fitted into the user's buffer or we completed the scan. - put back the recalculation of agino (that got removed with the last fix) at the end of the while loop. This is because the code at the start of the loop expects agino to be the last inode examined if it is non-zero. - if we found some inodes but then encountered an error, return success this time and the error next time. If the formatter aborted with ENOMEM we will now return this error but only if we couldn't read any inodes. Previously if we encountered ENOMEM without reading any inodes we returned a zero count and no error which falsely indicated the scan was complete. SGI-PV: 973431 SGI-Modid: xfs-linux-melb:xfs-kern:30089a Signed-off-by: Lachlan McIlroy <lachlan@sgi.com> Signed-off-by: David Chinner <dgc@sgi.com>
2007-11-23 05:30:32 +00:00
if (cmd == XFS_IOC_FSINUMBERS_32) {
error = xfs_inumbers(mp, &inlast, &count,
bulkreq.ubuffer, inumbers_func);
} else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE_32) {
int res;
error = bs_one_func(mp, inlast, bulkreq.ubuffer,
bs_one_size, NULL, &res);
} else if (cmd == XFS_IOC_FSBULKSTAT_32) {
error = xfs_bulkstat(mp, &inlast, &count,
bs_one_func, bs_one_size,
bulkreq.ubuffer, &done);
} else
error = -EINVAL;
if (error)
return error;
if (bulkreq.ocount != NULL) {
if (copy_to_user(bulkreq.lastip, &inlast,
sizeof(xfs_ino_t)))
return -EFAULT;
if (copy_to_user(bulkreq.ocount, &count, sizeof(count)))
return -EFAULT;
}
return 0;
}
STATIC int
xfs_compat_handlereq_copyin(
xfs_fsop_handlereq_t *hreq,
compat_xfs_fsop_handlereq_t __user *arg32)
{
compat_xfs_fsop_handlereq_t hreq32;
if (copy_from_user(&hreq32, arg32, sizeof(compat_xfs_fsop_handlereq_t)))
return -EFAULT;
hreq->fd = hreq32.fd;
hreq->path = compat_ptr(hreq32.path);
hreq->oflags = hreq32.oflags;
hreq->ihandle = compat_ptr(hreq32.ihandle);
hreq->ihandlen = hreq32.ihandlen;
hreq->ohandle = compat_ptr(hreq32.ohandle);
hreq->ohandlen = compat_ptr(hreq32.ohandlen);
return 0;
}
STATIC struct dentry *
xfs_compat_handlereq_to_dentry(
struct file *parfilp,
compat_xfs_fsop_handlereq_t *hreq)
{
return xfs_handle_to_dentry(parfilp,
compat_ptr(hreq->ihandle), hreq->ihandlen);
}
STATIC int
xfs_compat_attrlist_by_handle(
struct file *parfilp,
void __user *arg)
{
int error;
attrlist_cursor_kern_t *cursor;
compat_xfs_fsop_attrlist_handlereq_t __user *p = arg;
compat_xfs_fsop_attrlist_handlereq_t al_hreq;
struct dentry *dentry;
char *kbuf;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (copy_from_user(&al_hreq, arg,
sizeof(compat_xfs_fsop_attrlist_handlereq_t)))
return -EFAULT;
if (al_hreq.buflen < sizeof(struct attrlist) ||
al_hreq.buflen > XFS_XATTR_LIST_MAX)
return -EINVAL;
/*
* Reject flags, only allow namespaces.
*/
if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE))
return -EINVAL;
dentry = xfs_compat_handlereq_to_dentry(parfilp, &al_hreq.hreq);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
error = -ENOMEM;
kbuf = kmem_zalloc_large(al_hreq.buflen, KM_SLEEP);
if (!kbuf)
goto out_dput;
cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
error = xfs_attr_list(XFS_I(d_inode(dentry)), kbuf, al_hreq.buflen,
al_hreq.flags, cursor);
if (error)
goto out_kfree;
if (copy_to_user(&p->pos, cursor, sizeof(attrlist_cursor_kern_t))) {
error = -EFAULT;
goto out_kfree;
}
if (copy_to_user(compat_ptr(al_hreq.buffer), kbuf, al_hreq.buflen))
error = -EFAULT;
out_kfree:
kmem_free(kbuf);
out_dput:
dput(dentry);
return error;
}
STATIC int
xfs_compat_attrmulti_by_handle(
struct file *parfilp,
void __user *arg)
{
int error;
compat_xfs_attr_multiop_t *ops;
compat_xfs_fsop_attrmulti_handlereq_t am_hreq;
struct dentry *dentry;
unsigned int i, size;
unsigned char *attr_name;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (copy_from_user(&am_hreq, arg,
sizeof(compat_xfs_fsop_attrmulti_handlereq_t)))
return -EFAULT;
/* overflow check */
if (am_hreq.opcount >= INT_MAX / sizeof(compat_xfs_attr_multiop_t))
return -E2BIG;
dentry = xfs_compat_handlereq_to_dentry(parfilp, &am_hreq.hreq);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
error = -E2BIG;
size = am_hreq.opcount * sizeof(compat_xfs_attr_multiop_t);
if (!size || size > 16 * PAGE_SIZE)
goto out_dput;
ops = memdup_user(compat_ptr(am_hreq.ops), size);
if (IS_ERR(ops)) {
error = PTR_ERR(ops);
goto out_dput;
}
error = -ENOMEM;
attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL);
if (!attr_name)
goto out_kfree_ops;
error = 0;
for (i = 0; i < am_hreq.opcount; i++) {
ops[i].am_error = strncpy_from_user((char *)attr_name,
compat_ptr(ops[i].am_attrname),
MAXNAMELEN);
if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
error = -ERANGE;
if (ops[i].am_error < 0)
break;
switch (ops[i].am_opcode) {
case ATTR_OP_GET:
ops[i].am_error = xfs_attrmulti_attr_get(
d_inode(dentry), attr_name,
compat_ptr(ops[i].am_attrvalue),
&ops[i].am_length, ops[i].am_flags);
break;
case ATTR_OP_SET:
ops[i].am_error = mnt_want_write_file(parfilp);
if (ops[i].am_error)
break;
ops[i].am_error = xfs_attrmulti_attr_set(
d_inode(dentry), attr_name,
compat_ptr(ops[i].am_attrvalue),
ops[i].am_length, ops[i].am_flags);
mnt_drop_write_file(parfilp);
break;
case ATTR_OP_REMOVE:
ops[i].am_error = mnt_want_write_file(parfilp);
if (ops[i].am_error)
break;
ops[i].am_error = xfs_attrmulti_attr_remove(
d_inode(dentry), attr_name,
ops[i].am_flags);
mnt_drop_write_file(parfilp);
break;
default:
ops[i].am_error = -EINVAL;
}
}
if (copy_to_user(compat_ptr(am_hreq.ops), ops, size))
error = -EFAULT;
kfree(attr_name);
out_kfree_ops:
kfree(ops);
out_dput:
dput(dentry);
return error;
}
STATIC int
xfs_compat_fssetdm_by_handle(
struct file *parfilp,
void __user *arg)
{
int error;
struct fsdmidata fsd;
compat_xfs_fsop_setdm_handlereq_t dmhreq;
struct dentry *dentry;
if (!capable(CAP_MKNOD))
return -EPERM;
if (copy_from_user(&dmhreq, arg,
sizeof(compat_xfs_fsop_setdm_handlereq_t)))
return -EFAULT;
dentry = xfs_compat_handlereq_to_dentry(parfilp, &dmhreq.hreq);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
if (IS_IMMUTABLE(d_inode(dentry)) || IS_APPEND(d_inode(dentry))) {
error = -EPERM;
goto out;
}
if (copy_from_user(&fsd, compat_ptr(dmhreq.data), sizeof(fsd))) {
error = -EFAULT;
goto out;
}
error = xfs_set_dmattrs(XFS_I(d_inode(dentry)), fsd.fsd_dmevmask,
fsd.fsd_dmstate);
out:
dput(dentry);
return error;
}
long
xfs_file_compat_ioctl(
struct file *filp,
unsigned cmd,
unsigned long p)
{
struct inode *inode = file_inode(filp);
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
void __user *arg = (void __user *)p;
int error;
trace_xfs_file_compat_ioctl(ip);
switch (cmd) {
/* No size or alignment issues on any arch */
case XFS_IOC_DIOINFO:
case XFS_IOC_FSGEOMETRY:
case XFS_IOC_FSGETXATTR:
case XFS_IOC_FSSETXATTR:
case XFS_IOC_FSGETXATTRA:
case XFS_IOC_FSSETDM:
case XFS_IOC_GETBMAP:
case XFS_IOC_GETBMAPA:
case XFS_IOC_GETBMAPX:
case XFS_IOC_FSCOUNTS:
case XFS_IOC_SET_RESBLKS:
case XFS_IOC_GET_RESBLKS:
case XFS_IOC_FSGROWFSLOG:
case XFS_IOC_GOINGDOWN:
case XFS_IOC_ERROR_INJECTION:
case XFS_IOC_ERROR_CLEARALL:
case FS_IOC_GETFSMAP:
case XFS_IOC_SCRUB_METADATA:
return xfs_file_ioctl(filp, cmd, p);
#if !defined(BROKEN_X86_ALIGNMENT) || defined(CONFIG_X86_X32)
/*
* These are handled fine if no alignment issues. To support x32
* which uses native 64-bit alignment we must emit these cases in
* addition to the ia-32 compat set below.
*/
case XFS_IOC_ALLOCSP:
case XFS_IOC_FREESP:
case XFS_IOC_RESVSP:
case XFS_IOC_UNRESVSP:
case XFS_IOC_ALLOCSP64:
case XFS_IOC_FREESP64:
case XFS_IOC_RESVSP64:
case XFS_IOC_UNRESVSP64:
case XFS_IOC_FSGEOMETRY_V1:
case XFS_IOC_FSGROWFSDATA:
case XFS_IOC_FSGROWFSRT:
case XFS_IOC_ZERO_RANGE:
#ifdef CONFIG_X86_X32
/*
* x32 special: this gets a different cmd number from the ia-32 compat
* case below; the associated data will match native 64-bit alignment.
*/
case XFS_IOC_SWAPEXT:
#endif
return xfs_file_ioctl(filp, cmd, p);
#endif
#if defined(BROKEN_X86_ALIGNMENT)
case XFS_IOC_ALLOCSP_32:
case XFS_IOC_FREESP_32:
case XFS_IOC_ALLOCSP64_32:
case XFS_IOC_FREESP64_32:
case XFS_IOC_RESVSP_32:
case XFS_IOC_UNRESVSP_32:
case XFS_IOC_RESVSP64_32:
case XFS_IOC_UNRESVSP64_32:
case XFS_IOC_ZERO_RANGE_32: {
struct xfs_flock64 bf;
if (xfs_compat_flock64_copyin(&bf, arg))
return -EFAULT;
cmd = _NATIVE_IOC(cmd, struct xfs_flock64);
return xfs_ioc_space(filp, cmd, &bf);
}
case XFS_IOC_FSGEOMETRY_V1_32:
return xfs_compat_ioc_fsgeometry_v1(mp, arg);
case XFS_IOC_FSGROWFSDATA_32: {
struct xfs_growfs_data in;
if (xfs_compat_growfs_data_copyin(&in, arg))
return -EFAULT;
error = mnt_want_write_file(filp);
if (error)
return error;
error = xfs_growfs_data(mp, &in);
mnt_drop_write_file(filp);
return error;
}
case XFS_IOC_FSGROWFSRT_32: {
struct xfs_growfs_rt in;
if (xfs_compat_growfs_rt_copyin(&in, arg))
return -EFAULT;
error = mnt_want_write_file(filp);
if (error)
return error;
error = xfs_growfs_rt(mp, &in);
mnt_drop_write_file(filp);
return error;
}
#endif
/* long changes size, but xfs only copiese out 32 bits */
case XFS_IOC_GETXFLAGS_32:
case XFS_IOC_SETXFLAGS_32:
case XFS_IOC_GETVERSION_32:
cmd = _NATIVE_IOC(cmd, long);
return xfs_file_ioctl(filp, cmd, p);
case XFS_IOC_SWAPEXT_32: {
struct xfs_swapext sxp;
struct compat_xfs_swapext __user *sxu = arg;
/* Bulk copy in up to the sx_stat field, then copy bstat */
if (copy_from_user(&sxp, sxu,
offsetof(struct xfs_swapext, sx_stat)) ||
xfs_ioctl32_bstat_copyin(&sxp.sx_stat, &sxu->sx_stat))
return -EFAULT;
error = mnt_want_write_file(filp);
if (error)
return error;
error = xfs_ioc_swapext(&sxp);
mnt_drop_write_file(filp);
return error;
}
case XFS_IOC_FSBULKSTAT_32:
case XFS_IOC_FSBULKSTAT_SINGLE_32:
case XFS_IOC_FSINUMBERS_32:
return xfs_compat_ioc_bulkstat(mp, cmd, arg);
case XFS_IOC_FD_TO_HANDLE_32:
case XFS_IOC_PATH_TO_HANDLE_32:
case XFS_IOC_PATH_TO_FSHANDLE_32: {
struct xfs_fsop_handlereq hreq;
if (xfs_compat_handlereq_copyin(&hreq, arg))
return -EFAULT;
cmd = _NATIVE_IOC(cmd, struct xfs_fsop_handlereq);
return xfs_find_handle(cmd, &hreq);
}
case XFS_IOC_OPEN_BY_HANDLE_32: {
struct xfs_fsop_handlereq hreq;
if (xfs_compat_handlereq_copyin(&hreq, arg))
return -EFAULT;
return xfs_open_by_handle(filp, &hreq);
}
case XFS_IOC_READLINK_BY_HANDLE_32: {
struct xfs_fsop_handlereq hreq;
if (xfs_compat_handlereq_copyin(&hreq, arg))
return -EFAULT;
return xfs_readlink_by_handle(filp, &hreq);
}
case XFS_IOC_ATTRLIST_BY_HANDLE_32:
return xfs_compat_attrlist_by_handle(filp, arg);
case XFS_IOC_ATTRMULTI_BY_HANDLE_32:
return xfs_compat_attrmulti_by_handle(filp, arg);
case XFS_IOC_FSSETDM_BY_HANDLE_32:
return xfs_compat_fssetdm_by_handle(filp, arg);
default:
return -ENOIOCTLCMD;
}
}