Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

We got slightly different patches removing a double word
in a comment in net/ipv4/raw.c - picked the version from net.

Simple conflict in drivers/net/ethernet/ibm/ibmvnic.c. Use cached
values instead of VNIC login response buffer (following what
commit 507ebe6444 ("ibmvnic: Fix use-after-free of VNIC login
response buffer") did).

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2020-09-04 21:18:58 -07:00
1669 changed files with 8287 additions and 5744 deletions

View File

@@ -1150,7 +1150,7 @@ static void io_prep_async_work(struct io_kiocb *req)
io_req_init_async(req);
if (req->flags & REQ_F_ISREG) {
if (def->hash_reg_file)
if (def->hash_reg_file || (req->ctx->flags & IORING_SETUP_IOPOLL))
io_wq_hash_work(&req->work, file_inode(req->file));
} else {
if (def->unbound_nonreg_file)
@@ -1746,7 +1746,8 @@ static struct io_kiocb *io_req_find_next(struct io_kiocb *req)
return __io_req_find_next(req);
}
static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb)
static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb,
bool twa_signal_ok)
{
struct task_struct *tsk = req->task;
struct io_ring_ctx *ctx = req->ctx;
@@ -1759,7 +1760,7 @@ static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb)
* will do the job.
*/
notify = 0;
if (!(ctx->flags & IORING_SETUP_SQPOLL))
if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok)
notify = TWA_SIGNAL;
ret = task_work_add(tsk, cb, notify);
@@ -1819,7 +1820,7 @@ static void io_req_task_queue(struct io_kiocb *req)
init_task_work(&req->task_work, io_req_task_submit);
percpu_ref_get(&req->ctx->refs);
ret = io_req_task_work_add(req, &req->task_work);
ret = io_req_task_work_add(req, &req->task_work, true);
if (unlikely(ret)) {
struct task_struct *tsk;
@@ -2048,6 +2049,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
req = list_first_entry(done, struct io_kiocb, inflight_entry);
if (READ_ONCE(req->result) == -EAGAIN) {
req->result = 0;
req->iopoll_completed = 0;
list_move_tail(&req->inflight_entry, &again);
continue;
@@ -2293,38 +2295,27 @@ end_req:
io_req_complete(req, ret);
return false;
}
static void io_rw_resubmit(struct callback_head *cb)
{
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
struct io_ring_ctx *ctx = req->ctx;
int err;
err = io_sq_thread_acquire_mm(ctx, req);
if (io_resubmit_prep(req, err)) {
refcount_inc(&req->refs);
io_queue_async_work(req);
}
percpu_ref_put(&ctx->refs);
}
#endif
static bool io_rw_reissue(struct io_kiocb *req, long res)
{
#ifdef CONFIG_BLOCK
umode_t mode = file_inode(req->file)->i_mode;
int ret;
if (!S_ISBLK(mode) && !S_ISREG(mode))
return false;
if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker())
return false;
init_task_work(&req->task_work, io_rw_resubmit);
percpu_ref_get(&req->ctx->refs);
ret = io_sq_thread_acquire_mm(req->ctx, req);
ret = io_req_task_work_add(req, &req->task_work);
if (!ret)
if (io_resubmit_prep(req, ret)) {
refcount_inc(&req->refs);
io_queue_async_work(req);
return true;
}
#endif
return false;
}
@@ -2563,7 +2554,7 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
* IO with EINTR.
*/
ret = -EINTR;
/* fall through */
fallthrough;
default:
kiocb->ki_complete(kiocb, ret, 0);
}
@@ -2865,6 +2856,11 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
return iov_iter_count(&req->io->rw.iter);
}
static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
{
return kiocb->ki_filp->f_mode & FMODE_STREAM ? NULL : &kiocb->ki_pos;
}
/*
* For files that don't have ->read_iter() and ->write_iter(), handle them
* by looping over ->read() or ->write() manually.
@@ -2900,10 +2896,10 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
if (rw == READ) {
nr = file->f_op->read(file, iovec.iov_base,
iovec.iov_len, &kiocb->ki_pos);
iovec.iov_len, io_kiocb_ppos(kiocb));
} else {
nr = file->f_op->write(file, iovec.iov_base,
iovec.iov_len, &kiocb->ki_pos);
iovec.iov_len, io_kiocb_ppos(kiocb));
}
if (iov_iter_is_bvec(iter))
@@ -3044,7 +3040,7 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
/* submit ref gets dropped, acquire a new one */
refcount_inc(&req->refs);
ret = io_req_task_work_add(req, &req->task_work);
ret = io_req_task_work_add(req, &req->task_work, true);
if (unlikely(ret)) {
struct task_struct *tsk;
@@ -3125,6 +3121,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
if (ret < 0)
return ret;
iov_count = iov_iter_count(iter);
io_size = ret;
req->result = io_size;
ret = 0;
@@ -3137,8 +3134,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
if (force_nonblock && !io_file_supports_async(req->file, READ))
goto copy_iov;
iov_count = iov_iter_count(iter);
ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count);
ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), iov_count);
if (unlikely(ret))
goto out_free;
@@ -3150,14 +3146,21 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
ret = 0;
goto out_free;
} else if (ret == -EAGAIN) {
if (!force_nonblock)
/* IOPOLL retry should happen for io-wq threads */
if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
goto done;
/* no retry on NONBLOCK marked file */
if (req->file->f_flags & O_NONBLOCK)
goto done;
/* some cases will consume bytes even on error returns */
iov_iter_revert(iter, iov_count - iov_iter_count(iter));
ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
if (ret)
goto out_free;
return -EAGAIN;
} else if (ret < 0) {
goto out_free;
/* make sure -ERESTARTSYS -> -EINTR is done */
goto done;
}
/* read it all, or we did blocking attempt. no retry. */
@@ -3241,6 +3244,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
if (ret < 0)
return ret;
iov_count = iov_iter_count(iter);
io_size = ret;
req->result = io_size;
@@ -3257,8 +3261,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
(req->flags & REQ_F_ISREG))
goto copy_iov;
iov_count = iov_iter_count(iter);
ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count);
ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), iov_count);
if (unlikely(ret))
goto out_free;
@@ -3290,10 +3293,19 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
*/
if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
ret2 = -EAGAIN;
/* no retry on NONBLOCK marked file */
if (ret2 == -EAGAIN && (req->file->f_flags & O_NONBLOCK))
goto done;
if (!force_nonblock || ret2 != -EAGAIN) {
/* IOPOLL retry should happen for io-wq threads */
if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
goto copy_iov;
done:
kiocb_done(kiocb, ret2, cs);
} else {
copy_iov:
/* some cases will consume bytes even on error returns */
iov_iter_revert(iter, iov_count - iov_iter_count(iter));
ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
if (!ret)
return -EAGAIN;
@@ -4566,6 +4578,7 @@ struct io_poll_table {
static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
__poll_t mask, task_work_func_t func)
{
bool twa_signal_ok;
int ret;
/* for instances that support it check for an event match first: */
@@ -4580,13 +4593,21 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
init_task_work(&req->task_work, func);
percpu_ref_get(&req->ctx->refs);
/*
* If we using the signalfd wait_queue_head for this wakeup, then
* it's not safe to use TWA_SIGNAL as we could be recursing on the
* tsk->sighand->siglock on doing the wakeup. Should not be needed
* either, as the normal wakeup will suffice.
*/
twa_signal_ok = (poll->head != &req->task->sighand->signalfd_wqh);
/*
* If this fails, then the task is exiting. When a task exits, the
* work gets canceled, so just cancel this request as well instead
* of executing it. We can't safely execute it anyway, as we may not
* have the needed state needed for it anyway.
*/
ret = io_req_task_work_add(req, &req->task_work);
ret = io_req_task_work_add(req, &req->task_work, twa_signal_ok);
if (unlikely(ret)) {
struct task_struct *tsk;
@@ -4875,12 +4896,20 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
struct async_poll *apoll;
struct io_poll_table ipt;
__poll_t mask, ret;
int rw;
if (!req->file || !file_can_poll(req->file))
return false;
if (req->flags & REQ_F_POLLED)
return false;
if (!def->pollin && !def->pollout)
if (def->pollin)
rw = READ;
else if (def->pollout)
rw = WRITE;
else
return false;
/* if we can't nonblock try, then no point in arming a poll handler */
if (!io_file_supports_async(req->file, rw))
return false;
apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
@@ -7311,7 +7340,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
index = i & IORING_FILE_TABLE_MASK;
if (table->files[index]) {
file = io_file_from_index(ctx, index);
file = table->files[index];
err = io_queue_file_removal(data, file);
if (err)
break;
@@ -7340,6 +7369,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
table->files[index] = file;
err = io_sqe_file_register(ctx, file, i);
if (err) {
table->files[index] = NULL;
fput(file);
break;
}
@@ -7439,9 +7469,6 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
{
int ret;
mmgrab(current->mm);
ctx->sqo_mm = current->mm;
if (ctx->flags & IORING_SETUP_SQPOLL) {
ret = -EPERM;
if (!capable(CAP_SYS_ADMIN))
@@ -7486,10 +7513,6 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
return 0;
err:
io_finish_async(ctx);
if (ctx->sqo_mm) {
mmdrop(ctx->sqo_mm);
ctx->sqo_mm = NULL;
}
return ret;
}
@@ -8539,6 +8562,9 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
ctx->user = user;
ctx->creds = get_current_cred();
mmgrab(current->mm);
ctx->sqo_mm = current->mm;
/*
* Account memory _before_ installing the file descriptor. Once
* the descriptor is installed, it can get closed at any time. Also