mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
xfs: move xfs_ioc_getfsmap out of xfs_ioctl.c
Move this function out of xfs_ioctl.c to reduce the clutter in there, and make the entire getfsmap implementation self-contained in a single file. Signed-off-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
516f91035c
commit
2ca7b9d7b8
@ -44,7 +44,7 @@ xfs_fsmap_from_internal(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Convert an fsmap to an xfs_fsmap. */
|
/* Convert an fsmap to an xfs_fsmap. */
|
||||||
void
|
static void
|
||||||
xfs_fsmap_to_internal(
|
xfs_fsmap_to_internal(
|
||||||
struct xfs_fsmap *dest,
|
struct xfs_fsmap *dest,
|
||||||
struct fsmap *src)
|
struct fsmap *src)
|
||||||
@ -889,7 +889,7 @@ xfs_getfsmap_check_keys(
|
|||||||
* xfs_getfsmap_info.low/high -- per-AG low/high keys computed from
|
* xfs_getfsmap_info.low/high -- per-AG low/high keys computed from
|
||||||
* dkeys; used to query the metadata.
|
* dkeys; used to query the metadata.
|
||||||
*/
|
*/
|
||||||
int
|
STATIC int
|
||||||
xfs_getfsmap(
|
xfs_getfsmap(
|
||||||
struct xfs_mount *mp,
|
struct xfs_mount *mp,
|
||||||
struct xfs_fsmap_head *head,
|
struct xfs_fsmap_head *head,
|
||||||
@ -1019,3 +1019,133 @@ xfs_getfsmap(
|
|||||||
head->fmh_oflags = FMH_OF_DEV_T;
|
head->fmh_oflags = FMH_OF_DEV_T;
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
xfs_ioc_getfsmap(
|
||||||
|
struct xfs_inode *ip,
|
||||||
|
struct fsmap_head __user *arg)
|
||||||
|
{
|
||||||
|
struct xfs_fsmap_head xhead = {0};
|
||||||
|
struct fsmap_head head;
|
||||||
|
struct fsmap *recs;
|
||||||
|
unsigned int count;
|
||||||
|
__u32 last_flags = 0;
|
||||||
|
bool done = false;
|
||||||
|
int error;
|
||||||
|
|
||||||
|
if (copy_from_user(&head, arg, sizeof(struct fsmap_head)))
|
||||||
|
return -EFAULT;
|
||||||
|
if (memchr_inv(head.fmh_reserved, 0, sizeof(head.fmh_reserved)) ||
|
||||||
|
memchr_inv(head.fmh_keys[0].fmr_reserved, 0,
|
||||||
|
sizeof(head.fmh_keys[0].fmr_reserved)) ||
|
||||||
|
memchr_inv(head.fmh_keys[1].fmr_reserved, 0,
|
||||||
|
sizeof(head.fmh_keys[1].fmr_reserved)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Use an internal memory buffer so that we don't have to copy fsmap
|
||||||
|
* data to userspace while holding locks. Start by trying to allocate
|
||||||
|
* up to 128k for the buffer, but fall back to a single page if needed.
|
||||||
|
*/
|
||||||
|
count = min_t(unsigned int, head.fmh_count,
|
||||||
|
131072 / sizeof(struct fsmap));
|
||||||
|
recs = kvcalloc(count, sizeof(struct fsmap), GFP_KERNEL);
|
||||||
|
if (!recs) {
|
||||||
|
count = min_t(unsigned int, head.fmh_count,
|
||||||
|
PAGE_SIZE / sizeof(struct fsmap));
|
||||||
|
recs = kvcalloc(count, sizeof(struct fsmap), GFP_KERNEL);
|
||||||
|
if (!recs)
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
xhead.fmh_iflags = head.fmh_iflags;
|
||||||
|
xfs_fsmap_to_internal(&xhead.fmh_keys[0], &head.fmh_keys[0]);
|
||||||
|
xfs_fsmap_to_internal(&xhead.fmh_keys[1], &head.fmh_keys[1]);
|
||||||
|
|
||||||
|
trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]);
|
||||||
|
trace_xfs_getfsmap_high_key(ip->i_mount, &xhead.fmh_keys[1]);
|
||||||
|
|
||||||
|
head.fmh_entries = 0;
|
||||||
|
do {
|
||||||
|
struct fsmap __user *user_recs;
|
||||||
|
struct fsmap *last_rec;
|
||||||
|
|
||||||
|
user_recs = &arg->fmh_recs[head.fmh_entries];
|
||||||
|
xhead.fmh_entries = 0;
|
||||||
|
xhead.fmh_count = min_t(unsigned int, count,
|
||||||
|
head.fmh_count - head.fmh_entries);
|
||||||
|
|
||||||
|
/* Run query, record how many entries we got. */
|
||||||
|
error = xfs_getfsmap(ip->i_mount, &xhead, recs);
|
||||||
|
switch (error) {
|
||||||
|
case 0:
|
||||||
|
/*
|
||||||
|
* There are no more records in the result set. Copy
|
||||||
|
* whatever we got to userspace and break out.
|
||||||
|
*/
|
||||||
|
done = true;
|
||||||
|
break;
|
||||||
|
case -ECANCELED:
|
||||||
|
/*
|
||||||
|
* The internal memory buffer is full. Copy whatever
|
||||||
|
* records we got to userspace and go again if we have
|
||||||
|
* not yet filled the userspace buffer.
|
||||||
|
*/
|
||||||
|
error = 0;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
goto out_free;
|
||||||
|
}
|
||||||
|
head.fmh_entries += xhead.fmh_entries;
|
||||||
|
head.fmh_oflags = xhead.fmh_oflags;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the caller wanted a record count or there aren't any
|
||||||
|
* new records to return, we're done.
|
||||||
|
*/
|
||||||
|
if (head.fmh_count == 0 || xhead.fmh_entries == 0)
|
||||||
|
break;
|
||||||
|
|
||||||
|
/* Copy all the records we got out to userspace. */
|
||||||
|
if (copy_to_user(user_recs, recs,
|
||||||
|
xhead.fmh_entries * sizeof(struct fsmap))) {
|
||||||
|
error = -EFAULT;
|
||||||
|
goto out_free;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Remember the last record flags we copied to userspace. */
|
||||||
|
last_rec = &recs[xhead.fmh_entries - 1];
|
||||||
|
last_flags = last_rec->fmr_flags;
|
||||||
|
|
||||||
|
/* Set up the low key for the next iteration. */
|
||||||
|
xfs_fsmap_to_internal(&xhead.fmh_keys[0], last_rec);
|
||||||
|
trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]);
|
||||||
|
} while (!done && head.fmh_entries < head.fmh_count);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If there are no more records in the query result set and we're not
|
||||||
|
* in counting mode, mark the last record returned with the LAST flag.
|
||||||
|
*/
|
||||||
|
if (done && head.fmh_count > 0 && head.fmh_entries > 0) {
|
||||||
|
struct fsmap __user *user_rec;
|
||||||
|
|
||||||
|
last_flags |= FMR_OF_LAST;
|
||||||
|
user_rec = &arg->fmh_recs[head.fmh_entries - 1];
|
||||||
|
|
||||||
|
if (copy_to_user(&user_rec->fmr_flags, &last_flags,
|
||||||
|
sizeof(last_flags))) {
|
||||||
|
error = -EFAULT;
|
||||||
|
goto out_free;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* copy back header */
|
||||||
|
if (copy_to_user(arg, &head, sizeof(struct fsmap_head))) {
|
||||||
|
error = -EFAULT;
|
||||||
|
goto out_free;
|
||||||
|
}
|
||||||
|
|
||||||
|
out_free:
|
||||||
|
kvfree(recs);
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
#define __XFS_FSMAP_H__
|
#define __XFS_FSMAP_H__
|
||||||
|
|
||||||
struct fsmap;
|
struct fsmap;
|
||||||
|
struct fsmap_head;
|
||||||
|
|
||||||
/* internal fsmap representation */
|
/* internal fsmap representation */
|
||||||
struct xfs_fsmap {
|
struct xfs_fsmap {
|
||||||
@ -27,9 +28,6 @@ struct xfs_fsmap_head {
|
|||||||
struct xfs_fsmap fmh_keys[2]; /* low and high keys */
|
struct xfs_fsmap fmh_keys[2]; /* low and high keys */
|
||||||
};
|
};
|
||||||
|
|
||||||
void xfs_fsmap_to_internal(struct xfs_fsmap *dest, struct fsmap *src);
|
int xfs_ioc_getfsmap(struct xfs_inode *ip, struct fsmap_head __user *arg);
|
||||||
|
|
||||||
int xfs_getfsmap(struct xfs_mount *mp, struct xfs_fsmap_head *head,
|
|
||||||
struct fsmap *out_recs);
|
|
||||||
|
|
||||||
#endif /* __XFS_FSMAP_H__ */
|
#endif /* __XFS_FSMAP_H__ */
|
||||||
|
@ -876,136 +876,6 @@ out_free_buf:
|
|||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
STATIC int
|
|
||||||
xfs_ioc_getfsmap(
|
|
||||||
struct xfs_inode *ip,
|
|
||||||
struct fsmap_head __user *arg)
|
|
||||||
{
|
|
||||||
struct xfs_fsmap_head xhead = {0};
|
|
||||||
struct fsmap_head head;
|
|
||||||
struct fsmap *recs;
|
|
||||||
unsigned int count;
|
|
||||||
__u32 last_flags = 0;
|
|
||||||
bool done = false;
|
|
||||||
int error;
|
|
||||||
|
|
||||||
if (copy_from_user(&head, arg, sizeof(struct fsmap_head)))
|
|
||||||
return -EFAULT;
|
|
||||||
if (memchr_inv(head.fmh_reserved, 0, sizeof(head.fmh_reserved)) ||
|
|
||||||
memchr_inv(head.fmh_keys[0].fmr_reserved, 0,
|
|
||||||
sizeof(head.fmh_keys[0].fmr_reserved)) ||
|
|
||||||
memchr_inv(head.fmh_keys[1].fmr_reserved, 0,
|
|
||||||
sizeof(head.fmh_keys[1].fmr_reserved)))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Use an internal memory buffer so that we don't have to copy fsmap
|
|
||||||
* data to userspace while holding locks. Start by trying to allocate
|
|
||||||
* up to 128k for the buffer, but fall back to a single page if needed.
|
|
||||||
*/
|
|
||||||
count = min_t(unsigned int, head.fmh_count,
|
|
||||||
131072 / sizeof(struct fsmap));
|
|
||||||
recs = kvcalloc(count, sizeof(struct fsmap), GFP_KERNEL);
|
|
||||||
if (!recs) {
|
|
||||||
count = min_t(unsigned int, head.fmh_count,
|
|
||||||
PAGE_SIZE / sizeof(struct fsmap));
|
|
||||||
recs = kvcalloc(count, sizeof(struct fsmap), GFP_KERNEL);
|
|
||||||
if (!recs)
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
xhead.fmh_iflags = head.fmh_iflags;
|
|
||||||
xfs_fsmap_to_internal(&xhead.fmh_keys[0], &head.fmh_keys[0]);
|
|
||||||
xfs_fsmap_to_internal(&xhead.fmh_keys[1], &head.fmh_keys[1]);
|
|
||||||
|
|
||||||
trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]);
|
|
||||||
trace_xfs_getfsmap_high_key(ip->i_mount, &xhead.fmh_keys[1]);
|
|
||||||
|
|
||||||
head.fmh_entries = 0;
|
|
||||||
do {
|
|
||||||
struct fsmap __user *user_recs;
|
|
||||||
struct fsmap *last_rec;
|
|
||||||
|
|
||||||
user_recs = &arg->fmh_recs[head.fmh_entries];
|
|
||||||
xhead.fmh_entries = 0;
|
|
||||||
xhead.fmh_count = min_t(unsigned int, count,
|
|
||||||
head.fmh_count - head.fmh_entries);
|
|
||||||
|
|
||||||
/* Run query, record how many entries we got. */
|
|
||||||
error = xfs_getfsmap(ip->i_mount, &xhead, recs);
|
|
||||||
switch (error) {
|
|
||||||
case 0:
|
|
||||||
/*
|
|
||||||
* There are no more records in the result set. Copy
|
|
||||||
* whatever we got to userspace and break out.
|
|
||||||
*/
|
|
||||||
done = true;
|
|
||||||
break;
|
|
||||||
case -ECANCELED:
|
|
||||||
/*
|
|
||||||
* The internal memory buffer is full. Copy whatever
|
|
||||||
* records we got to userspace and go again if we have
|
|
||||||
* not yet filled the userspace buffer.
|
|
||||||
*/
|
|
||||||
error = 0;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
goto out_free;
|
|
||||||
}
|
|
||||||
head.fmh_entries += xhead.fmh_entries;
|
|
||||||
head.fmh_oflags = xhead.fmh_oflags;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If the caller wanted a record count or there aren't any
|
|
||||||
* new records to return, we're done.
|
|
||||||
*/
|
|
||||||
if (head.fmh_count == 0 || xhead.fmh_entries == 0)
|
|
||||||
break;
|
|
||||||
|
|
||||||
/* Copy all the records we got out to userspace. */
|
|
||||||
if (copy_to_user(user_recs, recs,
|
|
||||||
xhead.fmh_entries * sizeof(struct fsmap))) {
|
|
||||||
error = -EFAULT;
|
|
||||||
goto out_free;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Remember the last record flags we copied to userspace. */
|
|
||||||
last_rec = &recs[xhead.fmh_entries - 1];
|
|
||||||
last_flags = last_rec->fmr_flags;
|
|
||||||
|
|
||||||
/* Set up the low key for the next iteration. */
|
|
||||||
xfs_fsmap_to_internal(&xhead.fmh_keys[0], last_rec);
|
|
||||||
trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]);
|
|
||||||
} while (!done && head.fmh_entries < head.fmh_count);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If there are no more records in the query result set and we're not
|
|
||||||
* in counting mode, mark the last record returned with the LAST flag.
|
|
||||||
*/
|
|
||||||
if (done && head.fmh_count > 0 && head.fmh_entries > 0) {
|
|
||||||
struct fsmap __user *user_rec;
|
|
||||||
|
|
||||||
last_flags |= FMR_OF_LAST;
|
|
||||||
user_rec = &arg->fmh_recs[head.fmh_entries - 1];
|
|
||||||
|
|
||||||
if (copy_to_user(&user_rec->fmr_flags, &last_flags,
|
|
||||||
sizeof(last_flags))) {
|
|
||||||
error = -EFAULT;
|
|
||||||
goto out_free;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* copy back header */
|
|
||||||
if (copy_to_user(arg, &head, sizeof(struct fsmap_head))) {
|
|
||||||
error = -EFAULT;
|
|
||||||
goto out_free;
|
|
||||||
}
|
|
||||||
|
|
||||||
out_free:
|
|
||||||
kvfree(recs);
|
|
||||||
return error;
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
int
|
||||||
xfs_ioc_swapext(
|
xfs_ioc_swapext(
|
||||||
xfs_swapext_t *sxp)
|
xfs_swapext_t *sxp)
|
||||||
|
Loading…
Reference in New Issue
Block a user