mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
Merge patch series "cachefiles: some bugfixes and cleanups for ondemand requests"
libaokun@huaweicloud.com <libaokun@huaweicloud.com> says: We've been testing ondemand mode for cachefiles since January, and we're almost done. We hit a lot of issues during the testing period, and this patch set fixes some of the issues related to ondemand requests. The patches have passed internal testing without regression. The following is a brief overview of the patches, see the patches for more details. Patch 1-5: Holding reference counts of reqs and objects on read requests to avoid malicious restore leading to use-after-free. Patch 6-10: Add some consistency checks to copen/cread/get_fd to avoid malicious copen/cread/close fd injections causing use-after-free or hung. Patch 11: When cache is marked as CACHEFILES_DEAD, flush all requests, otherwise the kernel may be hung. since this state is irreversible, the daemon can read open requests but cannot copen. Patch 12: Allow interrupting a read request being processed by killing the read process as a way of avoiding hung in some special cases. fs/cachefiles/daemon.c | 3 +- fs/cachefiles/internal.h | 5 + fs/cachefiles/ondemand.c | 217 ++++++++++++++++++++++-------- include/trace/events/cachefiles.h | 8 +- 4 files changed, 176 insertions(+), 57 deletions(-) * patches from https://lore.kernel.org/r/20240522114308.2402121-1-libaokun@huaweicloud.com: cachefiles: make on-demand read killable cachefiles: flush all requests after setting CACHEFILES_DEAD cachefiles: Set object to close if ondemand_id < 0 in copen cachefiles: defer exposing anon_fd until after copy_to_user() succeeds cachefiles: never get a new anonymous fd if ondemand_id is valid cachefiles: add spin_lock for cachefiles_ondemand_info cachefiles: add consistency check for copen/cread cachefiles: remove err_put_fd label in cachefiles_ondemand_daemon_read() cachefiles: fix slab-use-after-free in cachefiles_ondemand_daemon_read() cachefiles: fix slab-use-after-free in cachefiles_ondemand_get_fd() cachefiles: remove requests from xarray during flushing requests cachefiles: add output string to cachefiles_obj_[get|put]_ondemand_fd Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
commit
a82c13d299
@ -133,7 +133,7 @@ static int cachefiles_daemon_open(struct inode *inode, struct file *file)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cachefiles_flush_reqs(struct cachefiles_cache *cache)
|
||||
void cachefiles_flush_reqs(struct cachefiles_cache *cache)
|
||||
{
|
||||
struct xarray *xa = &cache->reqs;
|
||||
struct cachefiles_req *req;
|
||||
@ -159,6 +159,7 @@ static void cachefiles_flush_reqs(struct cachefiles_cache *cache)
|
||||
xa_for_each(xa, index, req) {
|
||||
req->error = -EIO;
|
||||
complete(&req->done);
|
||||
__xa_erase(xa, index);
|
||||
}
|
||||
xa_unlock(xa);
|
||||
|
||||
|
@ -55,6 +55,7 @@ struct cachefiles_ondemand_info {
|
||||
int ondemand_id;
|
||||
enum cachefiles_object_state state;
|
||||
struct cachefiles_object *object;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -138,6 +139,7 @@ static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache)
|
||||
struct cachefiles_req {
|
||||
struct cachefiles_object *object;
|
||||
struct completion done;
|
||||
refcount_t ref;
|
||||
int error;
|
||||
struct cachefiles_msg msg;
|
||||
};
|
||||
@ -186,6 +188,7 @@ extern int cachefiles_has_space(struct cachefiles_cache *cache,
|
||||
* daemon.c
|
||||
*/
|
||||
extern const struct file_operations cachefiles_daemon_fops;
|
||||
extern void cachefiles_flush_reqs(struct cachefiles_cache *cache);
|
||||
extern void cachefiles_get_unbind_pincount(struct cachefiles_cache *cache);
|
||||
extern void cachefiles_put_unbind_pincount(struct cachefiles_cache *cache);
|
||||
|
||||
@ -424,6 +427,8 @@ do { \
|
||||
pr_err("I/O Error: " FMT"\n", ##__VA_ARGS__); \
|
||||
fscache_io_error((___cache)->cache); \
|
||||
set_bit(CACHEFILES_DEAD, &(___cache)->flags); \
|
||||
if (cachefiles_in_ondemand_mode(___cache)) \
|
||||
cachefiles_flush_reqs(___cache); \
|
||||
} while (0)
|
||||
|
||||
#define cachefiles_io_error_obj(object, FMT, ...) \
|
||||
|
@ -4,19 +4,40 @@
|
||||
#include <linux/uio.h>
|
||||
#include "internal.h"
|
||||
|
||||
struct ondemand_anon_file {
|
||||
struct file *file;
|
||||
int fd;
|
||||
};
|
||||
|
||||
static inline void cachefiles_req_put(struct cachefiles_req *req)
|
||||
{
|
||||
if (refcount_dec_and_test(&req->ref))
|
||||
kfree(req);
|
||||
}
|
||||
|
||||
static int cachefiles_ondemand_fd_release(struct inode *inode,
|
||||
struct file *file)
|
||||
{
|
||||
struct cachefiles_object *object = file->private_data;
|
||||
struct cachefiles_cache *cache = object->volume->cache;
|
||||
struct cachefiles_ondemand_info *info = object->ondemand;
|
||||
int object_id = info->ondemand_id;
|
||||
struct cachefiles_cache *cache;
|
||||
struct cachefiles_ondemand_info *info;
|
||||
int object_id;
|
||||
struct cachefiles_req *req;
|
||||
XA_STATE(xas, &cache->reqs, 0);
|
||||
XA_STATE(xas, NULL, 0);
|
||||
|
||||
if (!object)
|
||||
return 0;
|
||||
|
||||
info = object->ondemand;
|
||||
cache = object->volume->cache;
|
||||
xas.xa = &cache->reqs;
|
||||
|
||||
xa_lock(&cache->reqs);
|
||||
spin_lock(&info->lock);
|
||||
object_id = info->ondemand_id;
|
||||
info->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED;
|
||||
cachefiles_ondemand_set_object_close(object);
|
||||
spin_unlock(&info->lock);
|
||||
|
||||
/* Only flush CACHEFILES_REQ_NEW marked req to avoid race with daemon_read */
|
||||
xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
|
||||
@ -76,12 +97,12 @@ static loff_t cachefiles_ondemand_fd_llseek(struct file *filp, loff_t pos,
|
||||
}
|
||||
|
||||
static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl,
|
||||
unsigned long arg)
|
||||
unsigned long id)
|
||||
{
|
||||
struct cachefiles_object *object = filp->private_data;
|
||||
struct cachefiles_cache *cache = object->volume->cache;
|
||||
struct cachefiles_req *req;
|
||||
unsigned long id;
|
||||
XA_STATE(xas, &cache->reqs, id);
|
||||
|
||||
if (ioctl != CACHEFILES_IOC_READ_COMPLETE)
|
||||
return -EINVAL;
|
||||
@ -89,10 +110,15 @@ static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl,
|
||||
if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
id = arg;
|
||||
req = xa_erase(&cache->reqs, id);
|
||||
if (!req)
|
||||
xa_lock(&cache->reqs);
|
||||
req = xas_load(&xas);
|
||||
if (!req || req->msg.opcode != CACHEFILES_OP_READ ||
|
||||
req->object != object) {
|
||||
xa_unlock(&cache->reqs);
|
||||
return -EINVAL;
|
||||
}
|
||||
xas_store(&xas, NULL);
|
||||
xa_unlock(&cache->reqs);
|
||||
|
||||
trace_cachefiles_ondemand_cread(object, id);
|
||||
complete(&req->done);
|
||||
@ -116,10 +142,12 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
|
||||
{
|
||||
struct cachefiles_req *req;
|
||||
struct fscache_cookie *cookie;
|
||||
struct cachefiles_ondemand_info *info;
|
||||
char *pid, *psize;
|
||||
unsigned long id;
|
||||
long size;
|
||||
int ret;
|
||||
XA_STATE(xas, &cache->reqs, 0);
|
||||
|
||||
if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
|
||||
return -EOPNOTSUPP;
|
||||
@ -143,10 +171,18 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
req = xa_erase(&cache->reqs, id);
|
||||
if (!req)
|
||||
xa_lock(&cache->reqs);
|
||||
xas.xa_index = id;
|
||||
req = xas_load(&xas);
|
||||
if (!req || req->msg.opcode != CACHEFILES_OP_OPEN ||
|
||||
!req->object->ondemand->ondemand_id) {
|
||||
xa_unlock(&cache->reqs);
|
||||
return -EINVAL;
|
||||
}
|
||||
xas_store(&xas, NULL);
|
||||
xa_unlock(&cache->reqs);
|
||||
|
||||
info = req->object->ondemand;
|
||||
/* fail OPEN request if copen format is invalid */
|
||||
ret = kstrtol(psize, 0, &size);
|
||||
if (ret) {
|
||||
@ -166,6 +202,32 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_lock(&info->lock);
|
||||
/*
|
||||
* The anonymous fd was closed before copen ? Fail the request.
|
||||
*
|
||||
* t1 | t2
|
||||
* ---------------------------------------------------------
|
||||
* cachefiles_ondemand_copen
|
||||
* req = xa_erase(&cache->reqs, id)
|
||||
* // Anon fd is maliciously closed.
|
||||
* cachefiles_ondemand_fd_release
|
||||
* xa_lock(&cache->reqs)
|
||||
* cachefiles_ondemand_set_object_close(object)
|
||||
* xa_unlock(&cache->reqs)
|
||||
* cachefiles_ondemand_set_object_open
|
||||
* // No one will ever close it again.
|
||||
* cachefiles_ondemand_daemon_read
|
||||
* cachefiles_ondemand_select_req
|
||||
*
|
||||
* Get a read req but its fd is already closed. The daemon can't
|
||||
* issue a cread ioctl with an closed fd, then hung.
|
||||
*/
|
||||
if (info->ondemand_id == CACHEFILES_ONDEMAND_ID_CLOSED) {
|
||||
spin_unlock(&info->lock);
|
||||
req->error = -EBADFD;
|
||||
goto out;
|
||||
}
|
||||
cookie = req->object->cookie;
|
||||
cookie->object_size = size;
|
||||
if (size)
|
||||
@ -175,9 +237,15 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
|
||||
trace_cachefiles_ondemand_copen(req->object, id, size);
|
||||
|
||||
cachefiles_ondemand_set_object_open(req->object);
|
||||
spin_unlock(&info->lock);
|
||||
wake_up_all(&cache->daemon_pollwq);
|
||||
|
||||
out:
|
||||
spin_lock(&info->lock);
|
||||
/* Need to set object close to avoid reopen status continuing */
|
||||
if (info->ondemand_id == CACHEFILES_ONDEMAND_ID_CLOSED)
|
||||
cachefiles_ondemand_set_object_close(req->object);
|
||||
spin_unlock(&info->lock);
|
||||
complete(&req->done);
|
||||
return ret;
|
||||
}
|
||||
@ -205,14 +273,14 @@ int cachefiles_ondemand_restore(struct cachefiles_cache *cache, char *args)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
|
||||
static int cachefiles_ondemand_get_fd(struct cachefiles_req *req,
|
||||
struct ondemand_anon_file *anon_file)
|
||||
{
|
||||
struct cachefiles_object *object;
|
||||
struct cachefiles_cache *cache;
|
||||
struct cachefiles_open *load;
|
||||
struct file *file;
|
||||
u32 object_id;
|
||||
int ret, fd;
|
||||
int ret;
|
||||
|
||||
object = cachefiles_grab_object(req->object,
|
||||
cachefiles_obj_get_ondemand_fd);
|
||||
@ -224,35 +292,53 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
fd = get_unused_fd_flags(O_WRONLY);
|
||||
if (fd < 0) {
|
||||
ret = fd;
|
||||
anon_file->fd = get_unused_fd_flags(O_WRONLY);
|
||||
if (anon_file->fd < 0) {
|
||||
ret = anon_file->fd;
|
||||
goto err_free_id;
|
||||
}
|
||||
|
||||
file = anon_inode_getfile("[cachefiles]", &cachefiles_ondemand_fd_fops,
|
||||
object, O_WRONLY);
|
||||
if (IS_ERR(file)) {
|
||||
ret = PTR_ERR(file);
|
||||
anon_file->file = anon_inode_getfile("[cachefiles]",
|
||||
&cachefiles_ondemand_fd_fops, object, O_WRONLY);
|
||||
if (IS_ERR(anon_file->file)) {
|
||||
ret = PTR_ERR(anon_file->file);
|
||||
goto err_put_fd;
|
||||
}
|
||||
|
||||
file->f_mode |= FMODE_PWRITE | FMODE_LSEEK;
|
||||
fd_install(fd, file);
|
||||
spin_lock(&object->ondemand->lock);
|
||||
if (object->ondemand->ondemand_id > 0) {
|
||||
spin_unlock(&object->ondemand->lock);
|
||||
/* Pair with check in cachefiles_ondemand_fd_release(). */
|
||||
anon_file->file->private_data = NULL;
|
||||
ret = -EEXIST;
|
||||
goto err_put_file;
|
||||
}
|
||||
|
||||
anon_file->file->f_mode |= FMODE_PWRITE | FMODE_LSEEK;
|
||||
|
||||
load = (void *)req->msg.data;
|
||||
load->fd = fd;
|
||||
load->fd = anon_file->fd;
|
||||
object->ondemand->ondemand_id = object_id;
|
||||
spin_unlock(&object->ondemand->lock);
|
||||
|
||||
cachefiles_get_unbind_pincount(cache);
|
||||
trace_cachefiles_ondemand_open(object, &req->msg, load);
|
||||
return 0;
|
||||
|
||||
err_put_file:
|
||||
fput(anon_file->file);
|
||||
anon_file->file = NULL;
|
||||
err_put_fd:
|
||||
put_unused_fd(fd);
|
||||
put_unused_fd(anon_file->fd);
|
||||
anon_file->fd = ret;
|
||||
err_free_id:
|
||||
xa_erase(&cache->ondemand_ids, object_id);
|
||||
err:
|
||||
spin_lock(&object->ondemand->lock);
|
||||
/* Avoid marking an opened object as closed. */
|
||||
if (object->ondemand->ondemand_id <= 0)
|
||||
cachefiles_ondemand_set_object_close(object);
|
||||
spin_unlock(&object->ondemand->lock);
|
||||
cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd);
|
||||
return ret;
|
||||
}
|
||||
@ -294,14 +380,28 @@ static struct cachefiles_req *cachefiles_ondemand_select_req(struct xa_state *xa
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline bool cachefiles_ondemand_finish_req(struct cachefiles_req *req,
|
||||
struct xa_state *xas, int err)
|
||||
{
|
||||
if (unlikely(!xas || !req))
|
||||
return false;
|
||||
|
||||
if (xa_cmpxchg(xas->xa, xas->xa_index, req, NULL, 0) != req)
|
||||
return false;
|
||||
|
||||
req->error = err;
|
||||
complete(&req->done);
|
||||
return true;
|
||||
}
|
||||
|
||||
ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
|
||||
char __user *_buffer, size_t buflen)
|
||||
{
|
||||
struct cachefiles_req *req;
|
||||
struct cachefiles_msg *msg;
|
||||
unsigned long id = 0;
|
||||
size_t n;
|
||||
int ret = 0;
|
||||
struct ondemand_anon_file anon_file;
|
||||
XA_STATE(xas, &cache->reqs, cache->req_id_next);
|
||||
|
||||
xa_lock(&cache->reqs);
|
||||
@ -330,42 +430,37 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
|
||||
|
||||
xas_clear_mark(&xas, CACHEFILES_REQ_NEW);
|
||||
cache->req_id_next = xas.xa_index + 1;
|
||||
refcount_inc(&req->ref);
|
||||
cachefiles_grab_object(req->object, cachefiles_obj_get_read_req);
|
||||
xa_unlock(&cache->reqs);
|
||||
|
||||
id = xas.xa_index;
|
||||
|
||||
if (msg->opcode == CACHEFILES_OP_OPEN) {
|
||||
ret = cachefiles_ondemand_get_fd(req);
|
||||
if (ret) {
|
||||
cachefiles_ondemand_set_object_close(req->object);
|
||||
goto error;
|
||||
}
|
||||
ret = cachefiles_ondemand_get_fd(req, &anon_file);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
msg->msg_id = id;
|
||||
msg->msg_id = xas.xa_index;
|
||||
msg->object_id = req->object->ondemand->ondemand_id;
|
||||
|
||||
if (copy_to_user(_buffer, msg, n) != 0) {
|
||||
if (copy_to_user(_buffer, msg, n) != 0)
|
||||
ret = -EFAULT;
|
||||
goto err_put_fd;
|
||||
|
||||
if (msg->opcode == CACHEFILES_OP_OPEN) {
|
||||
if (ret < 0) {
|
||||
fput(anon_file.file);
|
||||
put_unused_fd(anon_file.fd);
|
||||
goto out;
|
||||
}
|
||||
fd_install(anon_file.fd, anon_file.file);
|
||||
}
|
||||
|
||||
/* CLOSE request has no reply */
|
||||
if (msg->opcode == CACHEFILES_OP_CLOSE) {
|
||||
xa_erase(&cache->reqs, id);
|
||||
complete(&req->done);
|
||||
}
|
||||
|
||||
return n;
|
||||
|
||||
err_put_fd:
|
||||
if (msg->opcode == CACHEFILES_OP_OPEN)
|
||||
close_fd(((struct cachefiles_open *)msg->data)->fd);
|
||||
error:
|
||||
xa_erase(&cache->reqs, id);
|
||||
req->error = ret;
|
||||
complete(&req->done);
|
||||
return ret;
|
||||
out:
|
||||
cachefiles_put_object(req->object, cachefiles_obj_put_read_req);
|
||||
/* Remove error request and CLOSE request has no reply */
|
||||
if (ret || msg->opcode == CACHEFILES_OP_CLOSE)
|
||||
cachefiles_ondemand_finish_req(req, &xas, ret);
|
||||
cachefiles_req_put(req);
|
||||
return ret ? ret : n;
|
||||
}
|
||||
|
||||
typedef int (*init_req_fn)(struct cachefiles_req *req, void *private);
|
||||
@ -395,6 +490,7 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
|
||||
goto out;
|
||||
}
|
||||
|
||||
refcount_set(&req->ref, 1);
|
||||
req->object = object;
|
||||
init_completion(&req->done);
|
||||
req->msg.opcode = opcode;
|
||||
@ -454,9 +550,19 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
|
||||
goto out;
|
||||
|
||||
wake_up_all(&cache->daemon_pollwq);
|
||||
wait_for_completion(&req->done);
|
||||
ret = req->error;
|
||||
kfree(req);
|
||||
wait:
|
||||
ret = wait_for_completion_killable(&req->done);
|
||||
if (!ret) {
|
||||
ret = req->error;
|
||||
} else {
|
||||
ret = -EINTR;
|
||||
if (!cachefiles_ondemand_finish_req(req, &xas, ret)) {
|
||||
/* Someone will complete it soon. */
|
||||
cpu_relax();
|
||||
goto wait;
|
||||
}
|
||||
}
|
||||
cachefiles_req_put(req);
|
||||
return ret;
|
||||
out:
|
||||
/* Reset the object to close state in error handling path.
|
||||
@ -578,6 +684,7 @@ int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object,
|
||||
return -ENOMEM;
|
||||
|
||||
object->ondemand->object = object;
|
||||
spin_lock_init(&object->ondemand->lock);
|
||||
INIT_WORK(&object->ondemand->ondemand_work, ondemand_object_worker);
|
||||
return 0;
|
||||
}
|
||||
|
@ -33,6 +33,8 @@ enum cachefiles_obj_ref_trace {
|
||||
cachefiles_obj_see_withdrawal,
|
||||
cachefiles_obj_get_ondemand_fd,
|
||||
cachefiles_obj_put_ondemand_fd,
|
||||
cachefiles_obj_get_read_req,
|
||||
cachefiles_obj_put_read_req,
|
||||
};
|
||||
|
||||
enum fscache_why_object_killed {
|
||||
@ -127,7 +129,11 @@ enum cachefiles_error_trace {
|
||||
EM(cachefiles_obj_see_lookup_cookie, "SEE lookup_cookie") \
|
||||
EM(cachefiles_obj_see_lookup_failed, "SEE lookup_failed") \
|
||||
EM(cachefiles_obj_see_withdraw_cookie, "SEE withdraw_cookie") \
|
||||
E_(cachefiles_obj_see_withdrawal, "SEE withdrawal")
|
||||
EM(cachefiles_obj_see_withdrawal, "SEE withdrawal") \
|
||||
EM(cachefiles_obj_get_ondemand_fd, "GET ondemand_fd") \
|
||||
EM(cachefiles_obj_put_ondemand_fd, "PUT ondemand_fd") \
|
||||
EM(cachefiles_obj_get_read_req, "GET read_req") \
|
||||
E_(cachefiles_obj_put_read_req, "PUT read_req")
|
||||
|
||||
#define cachefiles_coherency_traces \
|
||||
EM(cachefiles_coherency_check_aux, "BAD aux ") \
|
||||
|
Loading…
Reference in New Issue
Block a user