io_uring-5.6-2020-03-13

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAl5rxtkQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpv/xEACifgfgyE3a2ZM7w2VTe41IpMxOouEnUWOJ
 oVKRp+9gkynE8pUGlE1igTa7T2nQIZM+Qd0KWqknkP2iFiQaNXSqqr8U6qIz9lzV
 I6SAcj0Pa2FRzlRly5UXLKiadIHbt2OfP6PIk6sXTcMCFUXb75/WzNFVOnNnBuee
 j8F5JUw45xyLXvQnfxpYSt8LeZyGYLoOwJEZX3j+hFHl1GCqSrAY8EB5tkXFbCZi
 L9JdJYOBEvnwFF4qxWl++2bmEOywnKeFea84JqbGr9BaVrDAOjAWMairZAU82xiI
 EWdQRKkSyDzrl+TACz/ri4J87fzE8FhBpHLufSY3HCxizaayNawxItDg5CCW1ghn
 i+bEaKq6djZn1CpSU0w0CTfA1g0D1DnErBS82znC8ciV1ZflAed8oADh3/+X64j8
 HzPT1DRoDGnzp4pBwTiZcG7Jb605Mh8i1TY1p35riaUbIR4y84BVNroEUHtO5Cmh
 U09efdYifsU9XM+u0OXK+SvrHqtDb6EVSx5x37qiV1SVxZ3JSsr9/uTjnBOrjH5W
 nUjqCzQfJZYSNmvRT6aSGDzk5wON95nnv7hYE9HWER/Cw7/VwKdJmBwehIAZUaXG
 NxJ7I/mVndGKV8ghoN119XVl7t2i56Ctj2pwu/UJH7lZB/Yfu9qZ5oKpku/Kbriy
 pYqSdy8J/Q==
 =0jJw
 -----END PGP SIGNATURE-----

Merge tag 'io_uring-5.6-2020-03-13' of git://git.kernel.dk/linux-block

Pull io_uring fix from Jens Axboe:
 "Just a single fix here, improving the RCU callback ordering from last
  week. After a bit more perusing by Paul, he poked a hole in the
  original"

* tag 'io_uring-5.6-2020-03-13' of git://git.kernel.dk/linux-block:
  io_uring: ensure RCU callback ordering with rcu_barrier()
This commit is contained in:
Linus Torvalds 2020-03-13 13:00:08 -07:00
commit 5007928eae

View File

@ -191,7 +191,6 @@ struct fixed_file_data {
struct llist_head put_llist; struct llist_head put_llist;
struct work_struct ref_work; struct work_struct ref_work;
struct completion done; struct completion done;
struct rcu_head rcu;
}; };
struct io_ring_ctx { struct io_ring_ctx {
@ -5331,26 +5330,23 @@ static void io_file_ref_kill(struct percpu_ref *ref)
complete(&data->done); complete(&data->done);
} }
static void __io_file_ref_exit_and_free(struct rcu_head *rcu) static void io_file_ref_exit_and_free(struct work_struct *work)
{ {
struct fixed_file_data *data = container_of(rcu, struct fixed_file_data, struct fixed_file_data *data;
rcu);
data = container_of(work, struct fixed_file_data, ref_work);
/*
* Ensure any percpu-ref atomic switch callback has run, it could have
* been in progress when the files were being unregistered. Once
* that's done, we can safely exit and free the ref and containing
* data structure.
*/
rcu_barrier();
percpu_ref_exit(&data->refs); percpu_ref_exit(&data->refs);
kfree(data); kfree(data);
} }
static void io_file_ref_exit_and_free(struct rcu_head *rcu)
{
/*
* We need to order our exit+free call against the potentially
* existing call_rcu() for switching to atomic. One way to do that
* is to have this rcu callback queue the final put and free, as we
* could otherwise have a pre-existing atomic switch complete _after_
* the free callback we queued.
*/
call_rcu(rcu, __io_file_ref_exit_and_free);
}
static int io_sqe_files_unregister(struct io_ring_ctx *ctx) static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
{ {
struct fixed_file_data *data = ctx->file_data; struct fixed_file_data *data = ctx->file_data;
@ -5369,7 +5365,8 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
for (i = 0; i < nr_tables; i++) for (i = 0; i < nr_tables; i++)
kfree(data->table[i].files); kfree(data->table[i].files);
kfree(data->table); kfree(data->table);
call_rcu(&data->rcu, io_file_ref_exit_and_free); INIT_WORK(&data->ref_work, io_file_ref_exit_and_free);
queue_work(system_wq, &data->ref_work);
ctx->file_data = NULL; ctx->file_data = NULL;
ctx->nr_user_files = 0; ctx->nr_user_files = 0;
return 0; return 0;